001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.imgproc;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.core.Mat;
009import org.opencv.core.MatOfFloat;
010import org.opencv.core.MatOfInt;
011import org.opencv.core.MatOfInt4;
012import org.opencv.core.MatOfPoint;
013import org.opencv.core.MatOfPoint2f;
014import org.opencv.core.Point;
015import org.opencv.core.Rect;
016import org.opencv.core.RotatedRect;
017import org.opencv.core.Scalar;
018import org.opencv.core.Size;
019import org.opencv.core.TermCriteria;
020import org.opencv.imgproc.CLAHE;
021import org.opencv.imgproc.GeneralizedHoughBallard;
022import org.opencv.imgproc.GeneralizedHoughGuil;
023import org.opencv.imgproc.LineSegmentDetector;
024import org.opencv.utils.Converters;
025
026// C++: class Imgproc
027
028public class Imgproc {
029
030    private static final int
031            IPL_BORDER_CONSTANT = 0,
032            IPL_BORDER_REPLICATE = 1,
033            IPL_BORDER_REFLECT = 2,
034            IPL_BORDER_WRAP = 3,
035            IPL_BORDER_REFLECT_101 = 4,
036            IPL_BORDER_TRANSPARENT = 5,
037            CV_INTER_NN = 0,
038            CV_INTER_LINEAR = 1,
039            CV_INTER_CUBIC = 2,
040            CV_INTER_AREA = 3,
041            CV_INTER_LANCZOS4 = 4,
042            CV_MOP_ERODE = 0,
043            CV_MOP_DILATE = 1,
044            CV_MOP_OPEN = 2,
045            CV_MOP_CLOSE = 3,
046            CV_MOP_GRADIENT = 4,
047            CV_MOP_TOPHAT = 5,
048            CV_MOP_BLACKHAT = 6,
049            CV_RETR_EXTERNAL = 0,
050            CV_RETR_LIST = 1,
051            CV_RETR_CCOMP = 2,
052            CV_RETR_TREE = 3,
053            CV_RETR_FLOODFILL = 4,
054            CV_CHAIN_APPROX_NONE = 1,
055            CV_CHAIN_APPROX_SIMPLE = 2,
056            CV_CHAIN_APPROX_TC89_L1 = 3,
057            CV_CHAIN_APPROX_TC89_KCOS = 4,
058            CV_THRESH_BINARY = 0,
059            CV_THRESH_BINARY_INV = 1,
060            CV_THRESH_TRUNC = 2,
061            CV_THRESH_TOZERO = 3,
062            CV_THRESH_TOZERO_INV = 4,
063            CV_THRESH_MASK = 7,
064            CV_THRESH_OTSU = 8,
065            CV_THRESH_TRIANGLE = 16;
066
067
068    // C++: enum <unnamed>
069    public static final int
070            CV_GAUSSIAN_5x5 = 7,
071            CV_SCHARR = -1,
072            CV_MAX_SOBEL_KSIZE = 7,
073            CV_RGBA2mRGBA = 125,
074            CV_mRGBA2RGBA = 126,
075            CV_WARP_FILL_OUTLIERS = 8,
076            CV_WARP_INVERSE_MAP = 16,
077            CV_CHAIN_CODE = 0,
078            CV_LINK_RUNS = 5,
079            CV_POLY_APPROX_DP = 0,
080            CV_CONTOURS_MATCH_I1 = 1,
081            CV_CONTOURS_MATCH_I2 = 2,
082            CV_CONTOURS_MATCH_I3 = 3,
083            CV_CLOCKWISE = 1,
084            CV_COUNTER_CLOCKWISE = 2,
085            CV_COMP_CORREL = 0,
086            CV_COMP_CHISQR = 1,
087            CV_COMP_INTERSECT = 2,
088            CV_COMP_BHATTACHARYYA = 3,
089            CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA,
090            CV_COMP_CHISQR_ALT = 4,
091            CV_COMP_KL_DIV = 5,
092            CV_DIST_MASK_3 = 3,
093            CV_DIST_MASK_5 = 5,
094            CV_DIST_MASK_PRECISE = 0,
095            CV_DIST_LABEL_CCOMP = 0,
096            CV_DIST_LABEL_PIXEL = 1,
097            CV_DIST_USER = -1,
098            CV_DIST_L1 = 1,
099            CV_DIST_L2 = 2,
100            CV_DIST_C = 3,
101            CV_DIST_L12 = 4,
102            CV_DIST_FAIR = 5,
103            CV_DIST_WELSCH = 6,
104            CV_DIST_HUBER = 7,
105            CV_CANNY_L2_GRADIENT = (1 << 31),
106            CV_HOUGH_STANDARD = 0,
107            CV_HOUGH_PROBABILISTIC = 1,
108            CV_HOUGH_MULTI_SCALE = 2,
109            CV_HOUGH_GRADIENT = 3;
110
111
112    // C++: enum MorphShapes_c (MorphShapes_c)
113    public static final int
114            CV_SHAPE_RECT = 0,
115            CV_SHAPE_CROSS = 1,
116            CV_SHAPE_ELLIPSE = 2,
117            CV_SHAPE_CUSTOM = 100;
118
119
120    // C++: enum SmoothMethod_c (SmoothMethod_c)
121    public static final int
122            CV_BLUR_NO_SCALE = 0,
123            CV_BLUR = 1,
124            CV_GAUSSIAN = 2,
125            CV_MEDIAN = 3,
126            CV_BILATERAL = 4;
127
128
129    // C++: enum AdaptiveThresholdTypes (cv.AdaptiveThresholdTypes)
130    public static final int
131            ADAPTIVE_THRESH_MEAN_C = 0,
132            ADAPTIVE_THRESH_GAUSSIAN_C = 1;
133
134
135    // C++: enum ColorConversionCodes (cv.ColorConversionCodes)
136    public static final int
137            COLOR_BGR2BGRA = 0,
138            COLOR_RGB2RGBA = COLOR_BGR2BGRA,
139            COLOR_BGRA2BGR = 1,
140            COLOR_RGBA2RGB = COLOR_BGRA2BGR,
141            COLOR_BGR2RGBA = 2,
142            COLOR_RGB2BGRA = COLOR_BGR2RGBA,
143            COLOR_RGBA2BGR = 3,
144            COLOR_BGRA2RGB = COLOR_RGBA2BGR,
145            COLOR_BGR2RGB = 4,
146            COLOR_RGB2BGR = COLOR_BGR2RGB,
147            COLOR_BGRA2RGBA = 5,
148            COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
149            COLOR_BGR2GRAY = 6,
150            COLOR_RGB2GRAY = 7,
151            COLOR_GRAY2BGR = 8,
152            COLOR_GRAY2RGB = COLOR_GRAY2BGR,
153            COLOR_GRAY2BGRA = 9,
154            COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
155            COLOR_BGRA2GRAY = 10,
156            COLOR_RGBA2GRAY = 11,
157            COLOR_BGR2BGR565 = 12,
158            COLOR_RGB2BGR565 = 13,
159            COLOR_BGR5652BGR = 14,
160            COLOR_BGR5652RGB = 15,
161            COLOR_BGRA2BGR565 = 16,
162            COLOR_RGBA2BGR565 = 17,
163            COLOR_BGR5652BGRA = 18,
164            COLOR_BGR5652RGBA = 19,
165            COLOR_GRAY2BGR565 = 20,
166            COLOR_BGR5652GRAY = 21,
167            COLOR_BGR2BGR555 = 22,
168            COLOR_RGB2BGR555 = 23,
169            COLOR_BGR5552BGR = 24,
170            COLOR_BGR5552RGB = 25,
171            COLOR_BGRA2BGR555 = 26,
172            COLOR_RGBA2BGR555 = 27,
173            COLOR_BGR5552BGRA = 28,
174            COLOR_BGR5552RGBA = 29,
175            COLOR_GRAY2BGR555 = 30,
176            COLOR_BGR5552GRAY = 31,
177            COLOR_BGR2XYZ = 32,
178            COLOR_RGB2XYZ = 33,
179            COLOR_XYZ2BGR = 34,
180            COLOR_XYZ2RGB = 35,
181            COLOR_BGR2YCrCb = 36,
182            COLOR_RGB2YCrCb = 37,
183            COLOR_YCrCb2BGR = 38,
184            COLOR_YCrCb2RGB = 39,
185            COLOR_BGR2HSV = 40,
186            COLOR_RGB2HSV = 41,
187            COLOR_BGR2Lab = 44,
188            COLOR_RGB2Lab = 45,
189            COLOR_BGR2Luv = 50,
190            COLOR_RGB2Luv = 51,
191            COLOR_BGR2HLS = 52,
192            COLOR_RGB2HLS = 53,
193            COLOR_HSV2BGR = 54,
194            COLOR_HSV2RGB = 55,
195            COLOR_Lab2BGR = 56,
196            COLOR_Lab2RGB = 57,
197            COLOR_Luv2BGR = 58,
198            COLOR_Luv2RGB = 59,
199            COLOR_HLS2BGR = 60,
200            COLOR_HLS2RGB = 61,
201            COLOR_BGR2HSV_FULL = 66,
202            COLOR_RGB2HSV_FULL = 67,
203            COLOR_BGR2HLS_FULL = 68,
204            COLOR_RGB2HLS_FULL = 69,
205            COLOR_HSV2BGR_FULL = 70,
206            COLOR_HSV2RGB_FULL = 71,
207            COLOR_HLS2BGR_FULL = 72,
208            COLOR_HLS2RGB_FULL = 73,
209            COLOR_LBGR2Lab = 74,
210            COLOR_LRGB2Lab = 75,
211            COLOR_LBGR2Luv = 76,
212            COLOR_LRGB2Luv = 77,
213            COLOR_Lab2LBGR = 78,
214            COLOR_Lab2LRGB = 79,
215            COLOR_Luv2LBGR = 80,
216            COLOR_Luv2LRGB = 81,
217            COLOR_BGR2YUV = 82,
218            COLOR_RGB2YUV = 83,
219            COLOR_YUV2BGR = 84,
220            COLOR_YUV2RGB = 85,
221            COLOR_YUV2RGB_NV12 = 90,
222            COLOR_YUV2BGR_NV12 = 91,
223            COLOR_YUV2RGB_NV21 = 92,
224            COLOR_YUV2BGR_NV21 = 93,
225            COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
226            COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
227            COLOR_YUV2RGBA_NV12 = 94,
228            COLOR_YUV2BGRA_NV12 = 95,
229            COLOR_YUV2RGBA_NV21 = 96,
230            COLOR_YUV2BGRA_NV21 = 97,
231            COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
232            COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
233            COLOR_YUV2RGB_YV12 = 98,
234            COLOR_YUV2BGR_YV12 = 99,
235            COLOR_YUV2RGB_IYUV = 100,
236            COLOR_YUV2BGR_IYUV = 101,
237            COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
238            COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
239            COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
240            COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
241            COLOR_YUV2RGBA_YV12 = 102,
242            COLOR_YUV2BGRA_YV12 = 103,
243            COLOR_YUV2RGBA_IYUV = 104,
244            COLOR_YUV2BGRA_IYUV = 105,
245            COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
246            COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
247            COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
248            COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
249            COLOR_YUV2GRAY_420 = 106,
250            COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
251            COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
252            COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
253            COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
254            COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
255            COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
256            COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
257            COLOR_YUV2RGB_UYVY = 107,
258            COLOR_YUV2BGR_UYVY = 108,
259            COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
260            COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
261            COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
262            COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
263            COLOR_YUV2RGBA_UYVY = 111,
264            COLOR_YUV2BGRA_UYVY = 112,
265            COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
266            COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
267            COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
268            COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
269            COLOR_YUV2RGB_YUY2 = 115,
270            COLOR_YUV2BGR_YUY2 = 116,
271            COLOR_YUV2RGB_YVYU = 117,
272            COLOR_YUV2BGR_YVYU = 118,
273            COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
274            COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
275            COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
276            COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
277            COLOR_YUV2RGBA_YUY2 = 119,
278            COLOR_YUV2BGRA_YUY2 = 120,
279            COLOR_YUV2RGBA_YVYU = 121,
280            COLOR_YUV2BGRA_YVYU = 122,
281            COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
282            COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
283            COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
284            COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
285            COLOR_YUV2GRAY_UYVY = 123,
286            COLOR_YUV2GRAY_YUY2 = 124,
287            COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
288            COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
289            COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
290            COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
291            COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
292            COLOR_RGBA2mRGBA = 125,
293            COLOR_mRGBA2RGBA = 126,
294            COLOR_RGB2YUV_I420 = 127,
295            COLOR_BGR2YUV_I420 = 128,
296            COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
297            COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
298            COLOR_RGBA2YUV_I420 = 129,
299            COLOR_BGRA2YUV_I420 = 130,
300            COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
301            COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
302            COLOR_RGB2YUV_YV12 = 131,
303            COLOR_BGR2YUV_YV12 = 132,
304            COLOR_RGBA2YUV_YV12 = 133,
305            COLOR_BGRA2YUV_YV12 = 134,
306            COLOR_BayerBG2BGR = 46,
307            COLOR_BayerGB2BGR = 47,
308            COLOR_BayerRG2BGR = 48,
309            COLOR_BayerGR2BGR = 49,
310            COLOR_BayerRGGB2BGR = COLOR_BayerBG2BGR,
311            COLOR_BayerGRBG2BGR = COLOR_BayerGB2BGR,
312            COLOR_BayerBGGR2BGR = COLOR_BayerRG2BGR,
313            COLOR_BayerGBRG2BGR = COLOR_BayerGR2BGR,
314            COLOR_BayerRGGB2RGB = COLOR_BayerBGGR2BGR,
315            COLOR_BayerGRBG2RGB = COLOR_BayerGBRG2BGR,
316            COLOR_BayerBGGR2RGB = COLOR_BayerRGGB2BGR,
317            COLOR_BayerGBRG2RGB = COLOR_BayerGRBG2BGR,
318            COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,
319            COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,
320            COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,
321            COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,
322            COLOR_BayerBG2GRAY = 86,
323            COLOR_BayerGB2GRAY = 87,
324            COLOR_BayerRG2GRAY = 88,
325            COLOR_BayerGR2GRAY = 89,
326            COLOR_BayerRGGB2GRAY = COLOR_BayerBG2GRAY,
327            COLOR_BayerGRBG2GRAY = COLOR_BayerGB2GRAY,
328            COLOR_BayerBGGR2GRAY = COLOR_BayerRG2GRAY,
329            COLOR_BayerGBRG2GRAY = COLOR_BayerGR2GRAY,
330            COLOR_BayerBG2BGR_VNG = 62,
331            COLOR_BayerGB2BGR_VNG = 63,
332            COLOR_BayerRG2BGR_VNG = 64,
333            COLOR_BayerGR2BGR_VNG = 65,
334            COLOR_BayerRGGB2BGR_VNG = COLOR_BayerBG2BGR_VNG,
335            COLOR_BayerGRBG2BGR_VNG = COLOR_BayerGB2BGR_VNG,
336            COLOR_BayerBGGR2BGR_VNG = COLOR_BayerRG2BGR_VNG,
337            COLOR_BayerGBRG2BGR_VNG = COLOR_BayerGR2BGR_VNG,
338            COLOR_BayerRGGB2RGB_VNG = COLOR_BayerBGGR2BGR_VNG,
339            COLOR_BayerGRBG2RGB_VNG = COLOR_BayerGBRG2BGR_VNG,
340            COLOR_BayerBGGR2RGB_VNG = COLOR_BayerRGGB2BGR_VNG,
341            COLOR_BayerGBRG2RGB_VNG = COLOR_BayerGRBG2BGR_VNG,
342            COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,
343            COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,
344            COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,
345            COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,
346            COLOR_BayerBG2BGR_EA = 135,
347            COLOR_BayerGB2BGR_EA = 136,
348            COLOR_BayerRG2BGR_EA = 137,
349            COLOR_BayerGR2BGR_EA = 138,
350            COLOR_BayerRGGB2BGR_EA = COLOR_BayerBG2BGR_EA,
351            COLOR_BayerGRBG2BGR_EA = COLOR_BayerGB2BGR_EA,
352            COLOR_BayerBGGR2BGR_EA = COLOR_BayerRG2BGR_EA,
353            COLOR_BayerGBRG2BGR_EA = COLOR_BayerGR2BGR_EA,
354            COLOR_BayerRGGB2RGB_EA = COLOR_BayerBGGR2BGR_EA,
355            COLOR_BayerGRBG2RGB_EA = COLOR_BayerGBRG2BGR_EA,
356            COLOR_BayerBGGR2RGB_EA = COLOR_BayerRGGB2BGR_EA,
357            COLOR_BayerGBRG2RGB_EA = COLOR_BayerGRBG2BGR_EA,
358            COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA,
359            COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA,
360            COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA,
361            COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA,
362            COLOR_BayerBG2BGRA = 139,
363            COLOR_BayerGB2BGRA = 140,
364            COLOR_BayerRG2BGRA = 141,
365            COLOR_BayerGR2BGRA = 142,
366            COLOR_BayerRGGB2BGRA = COLOR_BayerBG2BGRA,
367            COLOR_BayerGRBG2BGRA = COLOR_BayerGB2BGRA,
368            COLOR_BayerBGGR2BGRA = COLOR_BayerRG2BGRA,
369            COLOR_BayerGBRG2BGRA = COLOR_BayerGR2BGRA,
370            COLOR_BayerRGGB2RGBA = COLOR_BayerBGGR2BGRA,
371            COLOR_BayerGRBG2RGBA = COLOR_BayerGBRG2BGRA,
372            COLOR_BayerBGGR2RGBA = COLOR_BayerRGGB2BGRA,
373            COLOR_BayerGBRG2RGBA = COLOR_BayerGRBG2BGRA,
374            COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA,
375            COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA,
376            COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA,
377            COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA,
378            COLOR_COLORCVT_MAX = 143;
379
380
381    // C++: enum ColormapTypes (cv.ColormapTypes)
382    public static final int
383            COLORMAP_AUTUMN = 0,
384            COLORMAP_BONE = 1,
385            COLORMAP_JET = 2,
386            COLORMAP_WINTER = 3,
387            COLORMAP_RAINBOW = 4,
388            COLORMAP_OCEAN = 5,
389            COLORMAP_SUMMER = 6,
390            COLORMAP_SPRING = 7,
391            COLORMAP_COOL = 8,
392            COLORMAP_HSV = 9,
393            COLORMAP_PINK = 10,
394            COLORMAP_HOT = 11,
395            COLORMAP_PARULA = 12,
396            COLORMAP_MAGMA = 13,
397            COLORMAP_INFERNO = 14,
398            COLORMAP_PLASMA = 15,
399            COLORMAP_VIRIDIS = 16,
400            COLORMAP_CIVIDIS = 17,
401            COLORMAP_TWILIGHT = 18,
402            COLORMAP_TWILIGHT_SHIFTED = 19,
403            COLORMAP_TURBO = 20,
404            COLORMAP_DEEPGREEN = 21;
405
406
407    // C++: enum ConnectedComponentsAlgorithmsTypes (cv.ConnectedComponentsAlgorithmsTypes)
408    public static final int
409            CCL_DEFAULT = -1,
410            CCL_WU = 0,
411            CCL_GRANA = 1,
412            CCL_BOLELLI = 2,
413            CCL_SAUF = 3,
414            CCL_BBDT = 4,
415            CCL_SPAGHETTI = 5;
416
417
418    // C++: enum ConnectedComponentsTypes (cv.ConnectedComponentsTypes)
419    public static final int
420            CC_STAT_LEFT = 0,
421            CC_STAT_TOP = 1,
422            CC_STAT_WIDTH = 2,
423            CC_STAT_HEIGHT = 3,
424            CC_STAT_AREA = 4,
425            CC_STAT_MAX = 5;
426
427
428    // C++: enum ContourApproximationModes (cv.ContourApproximationModes)
429    public static final int
430            CHAIN_APPROX_NONE = 1,
431            CHAIN_APPROX_SIMPLE = 2,
432            CHAIN_APPROX_TC89_L1 = 3,
433            CHAIN_APPROX_TC89_KCOS = 4;
434
435
436    // C++: enum DistanceTransformLabelTypes (cv.DistanceTransformLabelTypes)
437    public static final int
438            DIST_LABEL_CCOMP = 0,
439            DIST_LABEL_PIXEL = 1;
440
441
442    // C++: enum DistanceTransformMasks (cv.DistanceTransformMasks)
443    public static final int
444            DIST_MASK_3 = 3,
445            DIST_MASK_5 = 5,
446            DIST_MASK_PRECISE = 0;
447
448
449    // C++: enum DistanceTypes (cv.DistanceTypes)
450    public static final int
451            DIST_USER = -1,
452            DIST_L1 = 1,
453            DIST_L2 = 2,
454            DIST_C = 3,
455            DIST_L12 = 4,
456            DIST_FAIR = 5,
457            DIST_WELSCH = 6,
458            DIST_HUBER = 7;
459
460
461    // C++: enum FloodFillFlags (cv.FloodFillFlags)
462    public static final int
463            FLOODFILL_FIXED_RANGE = 1 << 16,
464            FLOODFILL_MASK_ONLY = 1 << 17;
465
466
467    // C++: enum GrabCutClasses (cv.GrabCutClasses)
468    public static final int
469            GC_BGD = 0,
470            GC_FGD = 1,
471            GC_PR_BGD = 2,
472            GC_PR_FGD = 3;
473
474
475    // C++: enum GrabCutModes (cv.GrabCutModes)
476    public static final int
477            GC_INIT_WITH_RECT = 0,
478            GC_INIT_WITH_MASK = 1,
479            GC_EVAL = 2,
480            GC_EVAL_FREEZE_MODEL = 3;
481
482
483    // C++: enum HersheyFonts (cv.HersheyFonts)
484    public static final int
485            FONT_HERSHEY_SIMPLEX = 0,
486            FONT_HERSHEY_PLAIN = 1,
487            FONT_HERSHEY_DUPLEX = 2,
488            FONT_HERSHEY_COMPLEX = 3,
489            FONT_HERSHEY_TRIPLEX = 4,
490            FONT_HERSHEY_COMPLEX_SMALL = 5,
491            FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
492            FONT_HERSHEY_SCRIPT_COMPLEX = 7,
493            FONT_ITALIC = 16;
494
495
496    // C++: enum HistCompMethods (cv.HistCompMethods)
497    public static final int
498            HISTCMP_CORREL = 0,
499            HISTCMP_CHISQR = 1,
500            HISTCMP_INTERSECT = 2,
501            HISTCMP_BHATTACHARYYA = 3,
502            HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA,
503            HISTCMP_CHISQR_ALT = 4,
504            HISTCMP_KL_DIV = 5;
505
506
507    // C++: enum HoughModes (cv.HoughModes)
508    public static final int
509            HOUGH_STANDARD = 0,
510            HOUGH_PROBABILISTIC = 1,
511            HOUGH_MULTI_SCALE = 2,
512            HOUGH_GRADIENT = 3,
513            HOUGH_GRADIENT_ALT = 4;
514
515
516    // C++: enum InterpolationFlags (cv.InterpolationFlags)
517    public static final int
518            INTER_NEAREST = 0,
519            INTER_LINEAR = 1,
520            INTER_CUBIC = 2,
521            INTER_AREA = 3,
522            INTER_LANCZOS4 = 4,
523            INTER_LINEAR_EXACT = 5,
524            INTER_NEAREST_EXACT = 6,
525            INTER_MAX = 7,
526            WARP_FILL_OUTLIERS = 8,
527            WARP_INVERSE_MAP = 16;
528
529
530    // C++: enum InterpolationMasks (cv.InterpolationMasks)
531    public static final int
532            INTER_BITS = 5,
533            INTER_BITS2 = INTER_BITS * 2,
534            INTER_TAB_SIZE = 1 << INTER_BITS,
535            INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE;
536
537
538    // C++: enum LineSegmentDetectorModes (cv.LineSegmentDetectorModes)
539    public static final int
540            LSD_REFINE_NONE = 0,
541            LSD_REFINE_STD = 1,
542            LSD_REFINE_ADV = 2;
543
544
545    // C++: enum LineTypes (cv.LineTypes)
546    public static final int
547            FILLED = -1,
548            LINE_4 = 4,
549            LINE_8 = 8,
550            LINE_AA = 16;
551
552
553    // C++: enum MarkerTypes (cv.MarkerTypes)
554    public static final int
555            MARKER_CROSS = 0,
556            MARKER_TILTED_CROSS = 1,
557            MARKER_STAR = 2,
558            MARKER_DIAMOND = 3,
559            MARKER_SQUARE = 4,
560            MARKER_TRIANGLE_UP = 5,
561            MARKER_TRIANGLE_DOWN = 6;
562
563
564    // C++: enum MorphShapes (cv.MorphShapes)
565    public static final int
566            MORPH_RECT = 0,
567            MORPH_CROSS = 1,
568            MORPH_ELLIPSE = 2;
569
570
571    // C++: enum MorphTypes (cv.MorphTypes)
572    public static final int
573            MORPH_ERODE = 0,
574            MORPH_DILATE = 1,
575            MORPH_OPEN = 2,
576            MORPH_CLOSE = 3,
577            MORPH_GRADIENT = 4,
578            MORPH_TOPHAT = 5,
579            MORPH_BLACKHAT = 6,
580            MORPH_HITMISS = 7;
581
582
583    // C++: enum RectanglesIntersectTypes (cv.RectanglesIntersectTypes)
584    public static final int
585            INTERSECT_NONE = 0,
586            INTERSECT_PARTIAL = 1,
587            INTERSECT_FULL = 2;
588
589
590    // C++: enum RetrievalModes (cv.RetrievalModes)
591    public static final int
592            RETR_EXTERNAL = 0,
593            RETR_LIST = 1,
594            RETR_CCOMP = 2,
595            RETR_TREE = 3,
596            RETR_FLOODFILL = 4;
597
598
599    // C++: enum ShapeMatchModes (cv.ShapeMatchModes)
600    public static final int
601            CONTOURS_MATCH_I1 = 1,
602            CONTOURS_MATCH_I2 = 2,
603            CONTOURS_MATCH_I3 = 3;
604
605
606    // C++: enum SpecialFilter (cv.SpecialFilter)
607    public static final int
608            FILTER_SCHARR = -1;
609
610
611    // C++: enum TemplateMatchModes (cv.TemplateMatchModes)
612    public static final int
613            TM_SQDIFF = 0,
614            TM_SQDIFF_NORMED = 1,
615            TM_CCORR = 2,
616            TM_CCORR_NORMED = 3,
617            TM_CCOEFF = 4,
618            TM_CCOEFF_NORMED = 5;
619
620
621    // C++: enum ThresholdTypes (cv.ThresholdTypes)
622    public static final int
623            THRESH_BINARY = 0,
624            THRESH_BINARY_INV = 1,
625            THRESH_TRUNC = 2,
626            THRESH_TOZERO = 3,
627            THRESH_TOZERO_INV = 4,
628            THRESH_MASK = 7,
629            THRESH_OTSU = 8,
630            THRESH_TRIANGLE = 16;
631
632
633    // C++: enum WarpPolarMode (cv.WarpPolarMode)
634    public static final int
635            WARP_POLAR_LINEAR = 0,
636            WARP_POLAR_LOG = 256;
637
638
639    //
640    // C++:  Ptr_LineSegmentDetector cv::createLineSegmentDetector(int refine = LSD_REFINE_STD, double scale = 0.8, double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, double log_eps = 0, double density_th = 0.7, int n_bins = 1024)
641    //
642
643    /**
644     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
645     *
646     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
647     * to edit those, as to tailor it for their own application.
648     *
649     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
650     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
651     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
652     * @param quant Bound to the quantization error on the gradient norm.
653     * @param ang_th Gradient angle tolerance in degrees.
654     * @param log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement is chosen.
655     * @param density_th Minimal density of aligned region points in the enclosing rectangle.
656     * @param n_bins Number of bins in pseudo-ordering of gradient modulus.
657     * @return automatically generated
658     */
659    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th, int n_bins) {
660        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_0(refine, scale, sigma_scale, quant, ang_th, log_eps, density_th, n_bins));
661    }
662
663    /**
664     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
665     *
666     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
667     * to edit those, as to tailor it for their own application.
668     *
669     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
670     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
671     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
672     * @param quant Bound to the quantization error on the gradient norm.
673     * @param ang_th Gradient angle tolerance in degrees.
674     * @param log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement is chosen.
675     * @param density_th Minimal density of aligned region points in the enclosing rectangle.
676     * @return automatically generated
677     */
678    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th) {
679        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_1(refine, scale, sigma_scale, quant, ang_th, log_eps, density_th));
680    }
681
682    /**
683     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
684     *
685     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
686     * to edit those, as to tailor it for their own application.
687     *
688     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
689     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
690     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
691     * @param quant Bound to the quantization error on the gradient norm.
692     * @param ang_th Gradient angle tolerance in degrees.
693     * @param log_eps Detection threshold: -log10(NFA) &gt; log_eps. Used only when advance refinement is chosen.
694     * @return automatically generated
695     */
696    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps) {
697        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_2(refine, scale, sigma_scale, quant, ang_th, log_eps));
698    }
699
700    /**
701     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
702     *
703     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
704     * to edit those, as to tailor it for their own application.
705     *
706     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
707     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
708     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
709     * @param quant Bound to the quantization error on the gradient norm.
710     * @param ang_th Gradient angle tolerance in degrees.
711     * @return automatically generated
712     */
713    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th) {
714        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_3(refine, scale, sigma_scale, quant, ang_th));
715    }
716
717    /**
718     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
719     *
720     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
721     * to edit those, as to tailor it for their own application.
722     *
723     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
724     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
725     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
726     * @param quant Bound to the quantization error on the gradient norm.
727     * @return automatically generated
728     */
729    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant) {
730        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_4(refine, scale, sigma_scale, quant));
731    }
732
733    /**
734     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
735     *
736     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
737     * to edit those, as to tailor it for their own application.
738     *
739     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
740     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
741     * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.
742     * @return automatically generated
743     */
744    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale) {
745        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_5(refine, scale, sigma_scale));
746    }
747
748    /**
749     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
750     *
751     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
752     * to edit those, as to tailor it for their own application.
753     *
754     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
755     * @param scale The scale of the image that will be used to find the lines. Range (0..1].
756     * @return automatically generated
757     */
758    public static LineSegmentDetector createLineSegmentDetector(int refine, double scale) {
759        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_6(refine, scale));
760    }
761
762    /**
763     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
764     *
765     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
766     * to edit those, as to tailor it for their own application.
767     *
768     * @param refine The way found lines will be refined, see #LineSegmentDetectorModes
769     * @return automatically generated
770     */
771    public static LineSegmentDetector createLineSegmentDetector(int refine) {
772        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_7(refine));
773    }
774
775    /**
776     * Creates a smart pointer to a LineSegmentDetector object and initializes it.
777     *
778     * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
779     * to edit those, as to tailor it for their own application.
780     *
781     * @return automatically generated
782     */
783    public static LineSegmentDetector createLineSegmentDetector() {
784        return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_8());
785    }
786
787
788    //
789    // C++:  Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
790    //
791
792    /**
793     * Returns Gaussian filter coefficients.
794     *
795     * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter
796     * coefficients:
797     *
798     * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\)
799     *
800     * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\).
801     *
802     * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
803     * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
804     * You may also use the higher-level GaussianBlur.
805     * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive.
806     * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
807     * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}.
808     * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
809     * SEE:  sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur
810     * @return automatically generated
811     */
812    public static Mat getGaussianKernel(int ksize, double sigma, int ktype) {
813        return new Mat(getGaussianKernel_0(ksize, sigma, ktype));
814    }
815
816    /**
817     * Returns Gaussian filter coefficients.
818     *
819     * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter
820     * coefficients:
821     *
822     * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\)
823     *
824     * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\).
825     *
826     * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize
827     * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.
828     * You may also use the higher-level GaussianBlur.
829     * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive.
830     * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as
831     * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}.
832     * SEE:  sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur
833     * @return automatically generated
834     */
835    public static Mat getGaussianKernel(int ksize, double sigma) {
836        return new Mat(getGaussianKernel_1(ksize, sigma));
837    }
838
839
840    //
841    // C++:  void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
842    //
843
844    /**
845     * Returns filter coefficients for computing spatial image derivatives.
846     *
847     * The function computes and returns the filter coefficients for spatial image derivatives. When
848     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
849     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
850     *
851     * @param kx Output matrix of row filter coefficients. It has the type ktype .
852     * @param ky Output matrix of column filter coefficients. It has the type ktype .
853     * @param dx Derivative order in respect of x.
854     * @param dy Derivative order in respect of y.
855     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
856     * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
857     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
858     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
859     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
860     * all the fractional bits, you may want to set normalize=false .
861     * @param ktype Type of filter coefficients. It can be CV_32f or CV_64F .
862     */
863    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype) {
864        getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype);
865    }
866
867    /**
868     * Returns filter coefficients for computing spatial image derivatives.
869     *
870     * The function computes and returns the filter coefficients for spatial image derivatives. When
871     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
872     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
873     *
874     * @param kx Output matrix of row filter coefficients. It has the type ktype .
875     * @param ky Output matrix of column filter coefficients. It has the type ktype .
876     * @param dx Derivative order in respect of x.
877     * @param dy Derivative order in respect of y.
878     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
879     * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.
880     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
881     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
882     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
883     * all the fractional bits, you may want to set normalize=false .
884     */
885    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize) {
886        getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize);
887    }
888
889    /**
890     * Returns filter coefficients for computing spatial image derivatives.
891     *
892     * The function computes and returns the filter coefficients for spatial image derivatives. When
893     * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel
894     * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to
895     *
896     * @param kx Output matrix of row filter coefficients. It has the type ktype .
897     * @param ky Output matrix of column filter coefficients. It has the type ktype .
898     * @param dx Derivative order in respect of x.
899     * @param dy Derivative order in respect of y.
900     * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.
901     * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are
902     * going to filter floating-point images, you are likely to use the normalized kernels. But if you
903     * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve
904     * all the fractional bits, you may want to set normalize=false .
905     */
906    public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize) {
907        getDerivKernels_2(kx.nativeObj, ky.nativeObj, dx, dy, ksize);
908    }
909
910
911    //
912    // C++:  Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
913    //
914
915    /**
916     * Returns Gabor filter coefficients.
917     *
918     * For more details about gabor filter equations and parameters, see: [Gabor
919     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
920     *
921     * @param ksize Size of the filter returned.
922     * @param sigma Standard deviation of the gaussian envelope.
923     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
924     * @param lambd Wavelength of the sinusoidal factor.
925     * @param gamma Spatial aspect ratio.
926     * @param psi Phase offset.
927     * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .
928     * @return automatically generated
929     */
930    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype) {
931        return new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype));
932    }
933
934    /**
935     * Returns Gabor filter coefficients.
936     *
937     * For more details about gabor filter equations and parameters, see: [Gabor
938     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
939     *
940     * @param ksize Size of the filter returned.
941     * @param sigma Standard deviation of the gaussian envelope.
942     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
943     * @param lambd Wavelength of the sinusoidal factor.
944     * @param gamma Spatial aspect ratio.
945     * @param psi Phase offset.
946     * @return automatically generated
947     */
948    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi) {
949        return new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi));
950    }
951
952    /**
953     * Returns Gabor filter coefficients.
954     *
955     * For more details about gabor filter equations and parameters, see: [Gabor
956     * Filter](http://en.wikipedia.org/wiki/Gabor_filter).
957     *
958     * @param ksize Size of the filter returned.
959     * @param sigma Standard deviation of the gaussian envelope.
960     * @param theta Orientation of the normal to the parallel stripes of a Gabor function.
961     * @param lambd Wavelength of the sinusoidal factor.
962     * @param gamma Spatial aspect ratio.
963     * @return automatically generated
964     */
965    public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma) {
966        return new Mat(getGaborKernel_2(ksize.width, ksize.height, sigma, theta, lambd, gamma));
967    }
968
969
970    //
971    // C++:  Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1))
972    //
973
974    /**
975     * Returns a structuring element of the specified size and shape for morphological operations.
976     *
977     * The function constructs and returns the structuring element that can be further passed to #erode,
978     * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
979     * the structuring element.
980     *
981     * @param shape Element shape that could be one of #MorphShapes
982     * @param ksize Size of the structuring element.
983     * @param anchor Anchor position within the element. The default value \((-1, -1)\) means that the
984     * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
985     * position. In other cases the anchor just regulates how much the result of the morphological
986     * operation is shifted.
987     * @return automatically generated
988     */
989    public static Mat getStructuringElement(int shape, Size ksize, Point anchor) {
990        return new Mat(getStructuringElement_0(shape, ksize.width, ksize.height, anchor.x, anchor.y));
991    }
992
993    /**
994     * Returns a structuring element of the specified size and shape for morphological operations.
995     *
996     * The function constructs and returns the structuring element that can be further passed to #erode,
997     * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as
998     * the structuring element.
999     *
1000     * @param shape Element shape that could be one of #MorphShapes
1001     * @param ksize Size of the structuring element.
1002     * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor
1003     * position. In other cases the anchor just regulates how much the result of the morphological
1004     * operation is shifted.
1005     * @return automatically generated
1006     */
1007    public static Mat getStructuringElement(int shape, Size ksize) {
1008        return new Mat(getStructuringElement_1(shape, ksize.width, ksize.height));
1009    }
1010
1011
1012    //
1013    // C++:  void cv::medianBlur(Mat src, Mat& dst, int ksize)
1014    //
1015
1016    /**
1017     * Blurs an image using the median filter.
1018     *
1019     * The function smoothes an image using the median filter with the \(\texttt{ksize} \times
1020     * \texttt{ksize}\) aperture. Each channel of a multi-channel image is processed independently.
1021     * In-place operation is supported.
1022     *
1023     * <b>Note:</b> The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes
1024     *
1025     * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be
1026     * CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.
1027     * @param dst destination array of the same size and type as src.
1028     * @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
1029     * SEE:  bilateralFilter, blur, boxFilter, GaussianBlur
1030     */
1031    public static void medianBlur(Mat src, Mat dst, int ksize) {
1032        medianBlur_0(src.nativeObj, dst.nativeObj, ksize);
1033    }
1034
1035
1036    //
1037    // C++:  void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT)
1038    //
1039
1040    /**
1041     * Blurs an image using a Gaussian filter.
1042     *
1043     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1044     * supported.
1045     *
1046     * @param src input image; the image can have any number of channels, which are processed
1047     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1048     * @param dst output image of the same size and type as src.
1049     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1050     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1051     * @param sigmaX Gaussian kernel standard deviation in X direction.
1052     * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
1053     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1054     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1055     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1056     * sigmaX, and sigmaY.
1057     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1058     *
1059     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1060     */
1061    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) {
1062        GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType);
1063    }
1064
1065    /**
1066     * Blurs an image using a Gaussian filter.
1067     *
1068     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1069     * supported.
1070     *
1071     * @param src input image; the image can have any number of channels, which are processed
1072     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1073     * @param dst output image of the same size and type as src.
1074     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1075     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1076     * @param sigmaX Gaussian kernel standard deviation in X direction.
1077     * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
1078     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1079     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1080     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1081     * sigmaX, and sigmaY.
1082     *
1083     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1084     */
1085    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) {
1086        GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY);
1087    }
1088
1089    /**
1090     * Blurs an image using a Gaussian filter.
1091     *
1092     * The function convolves the source image with the specified Gaussian kernel. In-place filtering is
1093     * supported.
1094     *
1095     * @param src input image; the image can have any number of channels, which are processed
1096     * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1097     * @param dst output image of the same size and type as src.
1098     * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
1099     * positive and odd. Or, they can be zero's and then they are computed from sigma.
1100     * @param sigmaX Gaussian kernel standard deviation in X direction.
1101     * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
1102     * respectively (see #getGaussianKernel for details); to fully control the result regardless of
1103     * possible future modifications of all this semantics, it is recommended to specify all of ksize,
1104     * sigmaX, and sigmaY.
1105     *
1106     * SEE:  sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
1107     */
1108    public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) {
1109        GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX);
1110    }
1111
1112
1113    //
1114    // C++:  void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
1115    //
1116
1117    /**
1118     * Applies the bilateral filter to an image.
1119     *
1120     * The function applies bilateral filtering to the input image, as described in
1121     * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
1122     * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
1123     * very slow compared to most filters.
1124     *
1125     * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (&lt;
1126     * 10), the filter will not have much effect, whereas if they are large (&gt; 150), they will have a very
1127     * strong effect, making the image look "cartoonish".
1128     *
1129     * _Filter size_: Large filters (d &gt; 5) are very slow, so it is recommended to use d=5 for real-time
1130     * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
1131     *
1132     * This filter does not work inplace.
1133     * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
1134     * @param dst Destination image of the same size and type as src .
1135     * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
1136     * it is computed from sigmaSpace.
1137     * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
1138     * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
1139     * in larger areas of semi-equal color.
1140     * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
1141     * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
1142     * ). When d&gt;0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
1143     * proportional to sigmaSpace.
1144     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
1145     */
1146    public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType) {
1147        bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType);
1148    }
1149
1150    /**
1151     * Applies the bilateral filter to an image.
1152     *
1153     * The function applies bilateral filtering to the input image, as described in
1154     * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
1155     * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
1156     * very slow compared to most filters.
1157     *
1158     * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (&lt;
1159     * 10), the filter will not have much effect, whereas if they are large (&gt; 150), they will have a very
1160     * strong effect, making the image look "cartoonish".
1161     *
1162     * _Filter size_: Large filters (d &gt; 5) are very slow, so it is recommended to use d=5 for real-time
1163     * applications, and perhaps d=9 for offline applications that need heavy noise filtering.
1164     *
1165     * This filter does not work inplace.
1166     * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
1167     * @param dst Destination image of the same size and type as src .
1168     * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
1169     * it is computed from sigmaSpace.
1170     * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
1171     * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
1172     * in larger areas of semi-equal color.
1173     * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
1174     * farther pixels will influence each other as long as their colors are close enough (see sigmaColor
1175     * ). When d&gt;0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
1176     * proportional to sigmaSpace.
1177     */
1178    public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace) {
1179        bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace);
1180    }
1181
1182
1183    //
1184    // C++:  void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
1185    //
1186
1187    /**
1188     * Blurs an image using the box filter.
1189     *
1190     * The function smooths an image using the kernel:
1191     *
1192     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1193     *
1194     * where
1195     *
1196     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1197     *
1198     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1199     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1200     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1201     *
1202     * @param src input image.
1203     * @param dst output image of the same size and type as src.
1204     * @param ddepth the output image depth (-1 to use src.depth()).
1205     * @param ksize blurring kernel size.
1206     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1207     * center.
1208     * @param normalize flag, specifying whether the kernel is normalized by its area or not.
1209     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1210     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1211     */
1212    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) {
1213        boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType);
1214    }
1215
1216    /**
1217     * Blurs an image using the box filter.
1218     *
1219     * The function smooths an image using the kernel:
1220     *
1221     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1222     *
1223     * where
1224     *
1225     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1226     *
1227     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1228     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1229     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1230     *
1231     * @param src input image.
1232     * @param dst output image of the same size and type as src.
1233     * @param ddepth the output image depth (-1 to use src.depth()).
1234     * @param ksize blurring kernel size.
1235     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1236     * center.
1237     * @param normalize flag, specifying whether the kernel is normalized by its area or not.
1238     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1239     */
1240    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) {
1241        boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize);
1242    }
1243
1244    /**
1245     * Blurs an image using the box filter.
1246     *
1247     * The function smooths an image using the kernel:
1248     *
1249     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1250     *
1251     * where
1252     *
1253     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1254     *
1255     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1256     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1257     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1258     *
1259     * @param src input image.
1260     * @param dst output image of the same size and type as src.
1261     * @param ddepth the output image depth (-1 to use src.depth()).
1262     * @param ksize blurring kernel size.
1263     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1264     * center.
1265     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1266     */
1267    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) {
1268        boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y);
1269    }
1270
1271    /**
1272     * Blurs an image using the box filter.
1273     *
1274     * The function smooths an image using the kernel:
1275     *
1276     * \(\texttt{K} =  \alpha \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1 \end{bmatrix}\)
1277     *
1278     * where
1279     *
1280     * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} &amp; \texttt{when } \texttt{normalize=true}  \\1 &amp; \texttt{otherwise}\end{cases}\)
1281     *
1282     * Unnormalized box filter is useful for computing various integral characteristics over each pixel
1283     * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
1284     * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.
1285     *
1286     * @param src input image.
1287     * @param dst output image of the same size and type as src.
1288     * @param ddepth the output image depth (-1 to use src.depth()).
1289     * @param ksize blurring kernel size.
1290     * center.
1291     * SEE:  blur, bilateralFilter, GaussianBlur, medianBlur, integral
1292     */
1293    public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize) {
1294        boxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height);
1295    }
1296
1297
1298    //
1299    // C++:  void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT)
1300    //
1301
1302    /**
1303     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1304     *
1305     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1306     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1307     *
1308     * The unnormalized square box filter can be useful in computing local image statistics such as the local
1309     * variance and standard deviation around the neighborhood of a pixel.
1310     *
1311     * @param src input image
1312     * @param dst output image of the same size and type as src
1313     * @param ddepth the output image depth (-1 to use src.depth())
1314     * @param ksize kernel size
1315     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1316     * center.
1317     * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
1318     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1319     * SEE: boxFilter
1320     */
1321    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) {
1322        sqrBoxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType);
1323    }
1324
1325    /**
1326     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1327     *
1328     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1329     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1330     *
1331     * The unnormalized square box filter can be useful in computing local image statistics such as the local
1332     * variance and standard deviation around the neighborhood of a pixel.
1333     *
1334     * @param src input image
1335     * @param dst output image of the same size and type as src
1336     * @param ddepth the output image depth (-1 to use src.depth())
1337     * @param ksize kernel size
1338     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1339     * center.
1340     * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
1341     * SEE: boxFilter
1342     */
1343    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) {
1344        sqrBoxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize);
1345    }
1346
1347    /**
1348     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1349     *
1350     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1351     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1352     *
1353     * The unnormalized square box filter can be useful in computing local image statistics such as the local
1354     * variance and standard deviation around the neighborhood of a pixel.
1355     *
1356     * @param src input image
1357     * @param dst output image of the same size and type as src
1358     * @param ddepth the output image depth (-1 to use src.depth())
1359     * @param ksize kernel size
1360     * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
1361     * center.
1362     * SEE: boxFilter
1363     */
1364    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) {
1365        sqrBoxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y);
1366    }
1367
1368    /**
1369     * Calculates the normalized sum of squares of the pixel values overlapping the filter.
1370     *
1371     * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring
1372     * pixel values which overlap the filter placed over the pixel \( (x, y) \).
1373     *
1374     * The unnormalized square box filter can be useful in computing local image statistics such as the local
1375     * variance and standard deviation around the neighborhood of a pixel.
1376     *
1377     * @param src input image
1378     * @param dst output image of the same size and type as src
1379     * @param ddepth the output image depth (-1 to use src.depth())
1380     * @param ksize kernel size
1381     * center.
1382     * SEE: boxFilter
1383     */
1384    public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize) {
1385        sqrBoxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height);
1386    }
1387
1388
1389    //
1390    // C++:  void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
1391    //
1392
1393    /**
1394     * Blurs an image using the normalized box filter.
1395     *
1396     * The function smooths an image using the kernel:
1397     *
1398     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1399     *
1400     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1401     * anchor, true, borderType)`.
1402     *
1403     * @param src input image; it can have any number of channels, which are processed independently, but
1404     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1405     * @param dst output image of the same size and type as src.
1406     * @param ksize blurring kernel size.
1407     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1408     * center.
1409     * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
1410     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1411     */
1412    public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType) {
1413        blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType);
1414    }
1415
1416    /**
1417     * Blurs an image using the normalized box filter.
1418     *
1419     * The function smooths an image using the kernel:
1420     *
1421     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1422     *
1423     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1424     * anchor, true, borderType)`.
1425     *
1426     * @param src input image; it can have any number of channels, which are processed independently, but
1427     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1428     * @param dst output image of the same size and type as src.
1429     * @param ksize blurring kernel size.
1430     * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
1431     * center.
1432     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1433     */
1434    public static void blur(Mat src, Mat dst, Size ksize, Point anchor) {
1435        blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y);
1436    }
1437
1438    /**
1439     * Blurs an image using the normalized box filter.
1440     *
1441     * The function smooths an image using the kernel:
1442     *
1443     * \(\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \hdotsfor{6} \\ 1 &amp; 1 &amp; 1 &amp;  \cdots &amp; 1 &amp; 1  \\ \end{bmatrix}\)
1444     *
1445     * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize,
1446     * anchor, true, borderType)`.
1447     *
1448     * @param src input image; it can have any number of channels, which are processed independently, but
1449     * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
1450     * @param dst output image of the same size and type as src.
1451     * @param ksize blurring kernel size.
1452     * center.
1453     * SEE:  boxFilter, bilateralFilter, GaussianBlur, medianBlur
1454     */
1455    public static void blur(Mat src, Mat dst, Size ksize) {
1456        blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height);
1457    }
1458
1459
1460    //
1461    // C++:  void cv::stackBlur(Mat src, Mat& dst, Size ksize)
1462    //
1463
1464    /**
1465     * Blurs an image using the stackBlur.
1466     *
1467     * The function applies and stackBlur to an image.
1468     * stackBlur can generate similar results as Gaussian blur, and the time consumption does not increase with the increase of kernel size.
1469     * It creates a kind of moving stack of colors whilst scanning through the image. Thereby it just has to add one new block of color to the right side
1470     * of the stack and remove the leftmost color. The remaining colors on the topmost layer of the stack are either added on or reduced by one,
1471     * depending on if they are on the right or on the left side of the stack. The only supported borderType is BORDER_REPLICATE.
1472     * Original paper was proposed by Mario Klingemann, which can be found http://underdestruction.com/2004/02/25/stackblur-2004.
1473     *
1474     * @param src input image. The number of channels can be arbitrary, but the depth should be one of
1475     * CV_8U, CV_16U, CV_16S or CV_32F.
1476     * @param dst output image of the same size and type as src.
1477     * @param ksize stack-blurring kernel size. The ksize.width and ksize.height can differ but they both must be
1478     * positive and odd.
1479     */
1480    public static void stackBlur(Mat src, Mat dst, Size ksize) {
1481        stackBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height);
1482    }
1483
1484
1485    //
1486    // C++:  void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
1487    //
1488
1489    /**
1490     * Convolves an image with the kernel.
1491     *
1492     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1493     * the aperture is partially outside the image, the function interpolates outlier pixel values
1494     * according to the specified border mode.
1495     *
1496     * The function does actually compute correlation, not the convolution:
1497     *
1498     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1499     *
1500     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1501     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1502     * anchor.y - 1)`.
1503     *
1504     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1505     * larger) and the direct algorithm for small kernels.
1506     *
1507     * @param src input image.
1508     * @param dst output image of the same size and the same number of channels as src.
1509     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1510     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1511     * matrix; if you want to apply different kernels to different channels, split the image into
1512     * separate color planes using split and process them individually.
1513     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1514     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1515     * is at the kernel center.
1516     * @param delta optional value added to the filtered pixels before storing them in dst.
1517     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1518     * SEE:  sepFilter2D, dft, matchTemplate
1519     */
1520    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) {
1521        filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType);
1522    }
1523
1524    /**
1525     * Convolves an image with the kernel.
1526     *
1527     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1528     * the aperture is partially outside the image, the function interpolates outlier pixel values
1529     * according to the specified border mode.
1530     *
1531     * The function does actually compute correlation, not the convolution:
1532     *
1533     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1534     *
1535     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1536     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1537     * anchor.y - 1)`.
1538     *
1539     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1540     * larger) and the direct algorithm for small kernels.
1541     *
1542     * @param src input image.
1543     * @param dst output image of the same size and the same number of channels as src.
1544     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1545     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1546     * matrix; if you want to apply different kernels to different channels, split the image into
1547     * separate color planes using split and process them individually.
1548     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1549     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1550     * is at the kernel center.
1551     * @param delta optional value added to the filtered pixels before storing them in dst.
1552     * SEE:  sepFilter2D, dft, matchTemplate
1553     */
1554    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta) {
1555        filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta);
1556    }
1557
1558    /**
1559     * Convolves an image with the kernel.
1560     *
1561     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1562     * the aperture is partially outside the image, the function interpolates outlier pixel values
1563     * according to the specified border mode.
1564     *
1565     * The function does actually compute correlation, not the convolution:
1566     *
1567     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1568     *
1569     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1570     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1571     * anchor.y - 1)`.
1572     *
1573     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1574     * larger) and the direct algorithm for small kernels.
1575     *
1576     * @param src input image.
1577     * @param dst output image of the same size and the same number of channels as src.
1578     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1579     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1580     * matrix; if you want to apply different kernels to different channels, split the image into
1581     * separate color planes using split and process them individually.
1582     * @param anchor anchor of the kernel that indicates the relative position of a filtered point within
1583     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1584     * is at the kernel center.
1585     * SEE:  sepFilter2D, dft, matchTemplate
1586     */
1587    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor) {
1588        filter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y);
1589    }
1590
1591    /**
1592     * Convolves an image with the kernel.
1593     *
1594     * The function applies an arbitrary linear filter to an image. In-place operation is supported. When
1595     * the aperture is partially outside the image, the function interpolates outlier pixel values
1596     * according to the specified border mode.
1597     *
1598     * The function does actually compute correlation, not the convolution:
1599     *
1600     * \(\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' &lt; \texttt{kernel.cols}\\{0\leq y' &lt; \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\)
1601     *
1602     * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
1603     * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
1604     * anchor.y - 1)`.
1605     *
1606     * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or
1607     * larger) and the direct algorithm for small kernels.
1608     *
1609     * @param src input image.
1610     * @param dst output image of the same size and the same number of channels as src.
1611     * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations"
1612     * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
1613     * matrix; if you want to apply different kernels to different channels, split the image into
1614     * separate color planes using split and process them individually.
1615     * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
1616     * is at the kernel center.
1617     * SEE:  sepFilter2D, dft, matchTemplate
1618     */
1619    public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel) {
1620        filter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj);
1621    }
1622
1623
1624    //
1625    // C++:  void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
1626    //
1627
1628    /**
1629     * Applies a separable linear filter to an image.
1630     *
1631     * The function applies a separable linear filter to the image. That is, first, every row of src is
1632     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1633     * kernel kernelY. The final result shifted by delta is stored in dst .
1634     *
1635     * @param src Source image.
1636     * @param dst Destination image of the same size and the same number of channels as src .
1637     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1638     * @param kernelX Coefficients for filtering each row.
1639     * @param kernelY Coefficients for filtering each column.
1640     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1641     * is at the kernel center.
1642     * @param delta Value added to the filtered results before storing them.
1643     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1644     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1645     */
1646    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) {
1647        sepFilter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta, borderType);
1648    }
1649
1650    /**
1651     * Applies a separable linear filter to an image.
1652     *
1653     * The function applies a separable linear filter to the image. That is, first, every row of src is
1654     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1655     * kernel kernelY. The final result shifted by delta is stored in dst .
1656     *
1657     * @param src Source image.
1658     * @param dst Destination image of the same size and the same number of channels as src .
1659     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1660     * @param kernelX Coefficients for filtering each row.
1661     * @param kernelY Coefficients for filtering each column.
1662     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1663     * is at the kernel center.
1664     * @param delta Value added to the filtered results before storing them.
1665     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1666     */
1667    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta) {
1668        sepFilter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta);
1669    }
1670
1671    /**
1672     * Applies a separable linear filter to an image.
1673     *
1674     * The function applies a separable linear filter to the image. That is, first, every row of src is
1675     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1676     * kernel kernelY. The final result shifted by delta is stored in dst .
1677     *
1678     * @param src Source image.
1679     * @param dst Destination image of the same size and the same number of channels as src .
1680     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1681     * @param kernelX Coefficients for filtering each row.
1682     * @param kernelY Coefficients for filtering each column.
1683     * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor
1684     * is at the kernel center.
1685     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1686     */
1687    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor) {
1688        sepFilter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y);
1689    }
1690
1691    /**
1692     * Applies a separable linear filter to an image.
1693     *
1694     * The function applies a separable linear filter to the image. That is, first, every row of src is
1695     * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
1696     * kernel kernelY. The final result shifted by delta is stored in dst .
1697     *
1698     * @param src Source image.
1699     * @param dst Destination image of the same size and the same number of channels as src .
1700     * @param ddepth Destination image depth, see REF: filter_depths "combinations"
1701     * @param kernelX Coefficients for filtering each row.
1702     * @param kernelY Coefficients for filtering each column.
1703     * is at the kernel center.
1704     * SEE:  filter2D, Sobel, GaussianBlur, boxFilter, blur
1705     */
1706    public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY) {
1707        sepFilter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj);
1708    }
1709
1710
1711    //
1712    // C++:  void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
1713    //
1714
1715    /**
1716     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1717     *
1718     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1719     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1720     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1721     * or the second x- or y- derivatives.
1722     *
1723     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1724     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1725     *
1726     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1727     *
1728     * for the x-derivative, or transposed for the y-derivative.
1729     *
1730     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1731     *
1732     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1733     *
1734     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1735     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1736     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1737     * case corresponds to a kernel of:
1738     *
1739     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1740     *
1741     * The second case corresponds to a kernel of:
1742     *
1743     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1744     *
1745     * @param src input image.
1746     * @param dst output image of the same size and the same number of channels as src .
1747     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1748     *     8-bit input images it will result in truncated derivatives.
1749     * @param dx order of the derivative x.
1750     * @param dy order of the derivative y.
1751     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1752     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1753     * applied (see #getDerivKernels for details).
1754     * @param delta optional delta value that is added to the results prior to storing them in dst.
1755     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
1756     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1757     */
1758    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) {
1759        Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType);
1760    }
1761
1762    /**
1763     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1764     *
1765     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1766     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1767     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1768     * or the second x- or y- derivatives.
1769     *
1770     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1771     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1772     *
1773     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1774     *
1775     * for the x-derivative, or transposed for the y-derivative.
1776     *
1777     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1778     *
1779     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1780     *
1781     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1782     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1783     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1784     * case corresponds to a kernel of:
1785     *
1786     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1787     *
1788     * The second case corresponds to a kernel of:
1789     *
1790     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1791     *
1792     * @param src input image.
1793     * @param dst output image of the same size and the same number of channels as src .
1794     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1795     *     8-bit input images it will result in truncated derivatives.
1796     * @param dx order of the derivative x.
1797     * @param dy order of the derivative y.
1798     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1799     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1800     * applied (see #getDerivKernels for details).
1801     * @param delta optional delta value that is added to the results prior to storing them in dst.
1802     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1803     */
1804    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta) {
1805        Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta);
1806    }
1807
1808    /**
1809     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1810     *
1811     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1812     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1813     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1814     * or the second x- or y- derivatives.
1815     *
1816     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1817     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1818     *
1819     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1820     *
1821     * for the x-derivative, or transposed for the y-derivative.
1822     *
1823     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1824     *
1825     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1826     *
1827     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1828     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1829     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1830     * case corresponds to a kernel of:
1831     *
1832     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1833     *
1834     * The second case corresponds to a kernel of:
1835     *
1836     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1837     *
1838     * @param src input image.
1839     * @param dst output image of the same size and the same number of channels as src .
1840     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1841     *     8-bit input images it will result in truncated derivatives.
1842     * @param dx order of the derivative x.
1843     * @param dy order of the derivative y.
1844     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1845     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
1846     * applied (see #getDerivKernels for details).
1847     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1848     */
1849    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale) {
1850        Sobel_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale);
1851    }
1852
1853    /**
1854     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1855     *
1856     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1857     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1858     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1859     * or the second x- or y- derivatives.
1860     *
1861     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1862     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1863     *
1864     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1865     *
1866     * for the x-derivative, or transposed for the y-derivative.
1867     *
1868     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1869     *
1870     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1871     *
1872     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1873     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1874     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1875     * case corresponds to a kernel of:
1876     *
1877     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1878     *
1879     * The second case corresponds to a kernel of:
1880     *
1881     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1882     *
1883     * @param src input image.
1884     * @param dst output image of the same size and the same number of channels as src .
1885     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1886     *     8-bit input images it will result in truncated derivatives.
1887     * @param dx order of the derivative x.
1888     * @param dy order of the derivative y.
1889     * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
1890     * applied (see #getDerivKernels for details).
1891     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1892     */
1893    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize) {
1894        Sobel_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize);
1895    }
1896
1897    /**
1898     * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
1899     *
1900     * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to
1901     * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\)
1902     * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first
1903     * or the second x- or y- derivatives.
1904     *
1905     * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr
1906     * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is
1907     *
1908     * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\)
1909     *
1910     * for the x-derivative, or transposed for the y-derivative.
1911     *
1912     * The function calculates an image derivative by convolving the image with the appropriate kernel:
1913     *
1914     * \(\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\)
1915     *
1916     * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
1917     * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
1918     * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
1919     * case corresponds to a kernel of:
1920     *
1921     * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\)
1922     *
1923     * The second case corresponds to a kernel of:
1924     *
1925     * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\)
1926     *
1927     * @param src input image.
1928     * @param dst output image of the same size and the same number of channels as src .
1929     * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of
1930     *     8-bit input images it will result in truncated derivatives.
1931     * @param dx order of the derivative x.
1932     * @param dy order of the derivative y.
1933     * applied (see #getDerivKernels for details).
1934     * SEE:  Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
1935     */
1936    public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy) {
1937        Sobel_4(src.nativeObj, dst.nativeObj, ddepth, dx, dy);
1938    }
1939
1940
1941    //
1942    // C++:  void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT)
1943    //
1944
1945    /**
1946     * Calculates the first order image derivative in both x and y using a Sobel operator
1947     *
1948     * Equivalent to calling:
1949     *
1950     * <code>
1951     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1952     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1953     * </code>
1954     *
1955     * @param src input image.
1956     * @param dx output image with first-order derivative in x.
1957     * @param dy output image with first-order derivative in y.
1958     * @param ksize size of Sobel kernel. It must be 3.
1959     * @param borderType pixel extrapolation method, see #BorderTypes.
1960     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
1961     *
1962     * SEE: Sobel
1963     */
1964    public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType) {
1965        spatialGradient_0(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize, borderType);
1966    }
1967
1968    /**
1969     * Calculates the first order image derivative in both x and y using a Sobel operator
1970     *
1971     * Equivalent to calling:
1972     *
1973     * <code>
1974     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1975     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1976     * </code>
1977     *
1978     * @param src input image.
1979     * @param dx output image with first-order derivative in x.
1980     * @param dy output image with first-order derivative in y.
1981     * @param ksize size of Sobel kernel. It must be 3.
1982     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
1983     *
1984     * SEE: Sobel
1985     */
1986    public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize) {
1987        spatialGradient_1(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize);
1988    }
1989
1990    /**
1991     * Calculates the first order image derivative in both x and y using a Sobel operator
1992     *
1993     * Equivalent to calling:
1994     *
1995     * <code>
1996     * Sobel( src, dx, CV_16SC1, 1, 0, 3 );
1997     * Sobel( src, dy, CV_16SC1, 0, 1, 3 );
1998     * </code>
1999     *
2000     * @param src input image.
2001     * @param dx output image with first-order derivative in x.
2002     * @param dy output image with first-order derivative in y.
2003     *                   Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
2004     *
2005     * SEE: Sobel
2006     */
2007    public static void spatialGradient(Mat src, Mat dx, Mat dy) {
2008        spatialGradient_2(src.nativeObj, dx.nativeObj, dy.nativeObj);
2009    }
2010
2011
2012    //
2013    // C++:  void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
2014    //
2015
2016    /**
2017     * Calculates the first x- or y- image derivative using Scharr operator.
2018     *
2019     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2020     * call
2021     *
2022     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2023     *
2024     * is equivalent to
2025     *
2026     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2027     *
2028     * @param src input image.
2029     * @param dst output image of the same size and the same number of channels as src.
2030     * @param ddepth output image depth, see REF: filter_depths "combinations"
2031     * @param dx order of the derivative x.
2032     * @param dy order of the derivative y.
2033     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2034     * applied (see #getDerivKernels for details).
2035     * @param delta optional delta value that is added to the results prior to storing them in dst.
2036     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
2037     * SEE:  cartToPolar
2038     */
2039    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) {
2040        Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType);
2041    }
2042
2043    /**
2044     * Calculates the first x- or y- image derivative using Scharr operator.
2045     *
2046     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2047     * call
2048     *
2049     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2050     *
2051     * is equivalent to
2052     *
2053     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2054     *
2055     * @param src input image.
2056     * @param dst output image of the same size and the same number of channels as src.
2057     * @param ddepth output image depth, see REF: filter_depths "combinations"
2058     * @param dx order of the derivative x.
2059     * @param dy order of the derivative y.
2060     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2061     * applied (see #getDerivKernels for details).
2062     * @param delta optional delta value that is added to the results prior to storing them in dst.
2063     * SEE:  cartToPolar
2064     */
2065    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta) {
2066        Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta);
2067    }
2068
2069    /**
2070     * Calculates the first x- or y- image derivative using Scharr operator.
2071     *
2072     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2073     * call
2074     *
2075     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2076     *
2077     * is equivalent to
2078     *
2079     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2080     *
2081     * @param src input image.
2082     * @param dst output image of the same size and the same number of channels as src.
2083     * @param ddepth output image depth, see REF: filter_depths "combinations"
2084     * @param dx order of the derivative x.
2085     * @param dy order of the derivative y.
2086     * @param scale optional scale factor for the computed derivative values; by default, no scaling is
2087     * applied (see #getDerivKernels for details).
2088     * SEE:  cartToPolar
2089     */
2090    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale) {
2091        Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale);
2092    }
2093
2094    /**
2095     * Calculates the first x- or y- image derivative using Scharr operator.
2096     *
2097     * The function computes the first x- or y- spatial image derivative using the Scharr operator. The
2098     * call
2099     *
2100     * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\)
2101     *
2102     * is equivalent to
2103     *
2104     * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\)
2105     *
2106     * @param src input image.
2107     * @param dst output image of the same size and the same number of channels as src.
2108     * @param ddepth output image depth, see REF: filter_depths "combinations"
2109     * @param dx order of the derivative x.
2110     * @param dy order of the derivative y.
2111     * applied (see #getDerivKernels for details).
2112     * SEE:  cartToPolar
2113     */
2114    public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy) {
2115        Scharr_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy);
2116    }
2117
2118
2119    //
2120    // C++:  void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
2121    //
2122
2123    /**
2124     * Calculates the Laplacian of an image.
2125     *
2126     * The function calculates the Laplacian of the source image by adding up the second x and y
2127     * derivatives calculated using the Sobel operator:
2128     *
2129     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2130     *
2131     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2132     * with the following \(3 \times 3\) aperture:
2133     *
2134     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2135     *
2136     * @param src Source image.
2137     * @param dst Destination image of the same size and the same number of channels as src .
2138     * @param ddepth Desired depth of the destination image, see REF: filter_depths "combinations".
2139     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2140     * details. The size must be positive and odd.
2141     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2142     * applied. See #getDerivKernels for details.
2143     * @param delta Optional delta value that is added to the results prior to storing them in dst .
2144     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
2145     * SEE:  Sobel, Scharr
2146     */
2147    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType) {
2148        Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType);
2149    }
2150
2151    /**
2152     * Calculates the Laplacian of an image.
2153     *
2154     * The function calculates the Laplacian of the source image by adding up the second x and y
2155     * derivatives calculated using the Sobel operator:
2156     *
2157     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2158     *
2159     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2160     * with the following \(3 \times 3\) aperture:
2161     *
2162     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2163     *
2164     * @param src Source image.
2165     * @param dst Destination image of the same size and the same number of channels as src .
2166     * @param ddepth Desired depth of the destination image, see REF: filter_depths "combinations".
2167     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2168     * details. The size must be positive and odd.
2169     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2170     * applied. See #getDerivKernels for details.
2171     * @param delta Optional delta value that is added to the results prior to storing them in dst .
2172     * SEE:  Sobel, Scharr
2173     */
2174    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta) {
2175        Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta);
2176    }
2177
2178    /**
2179     * Calculates the Laplacian of an image.
2180     *
2181     * The function calculates the Laplacian of the source image by adding up the second x and y
2182     * derivatives calculated using the Sobel operator:
2183     *
2184     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2185     *
2186     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2187     * with the following \(3 \times 3\) aperture:
2188     *
2189     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2190     *
2191     * @param src Source image.
2192     * @param dst Destination image of the same size and the same number of channels as src .
2193     * @param ddepth Desired depth of the destination image, see REF: filter_depths "combinations".
2194     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2195     * details. The size must be positive and odd.
2196     * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
2197     * applied. See #getDerivKernels for details.
2198     * SEE:  Sobel, Scharr
2199     */
2200    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale) {
2201        Laplacian_2(src.nativeObj, dst.nativeObj, ddepth, ksize, scale);
2202    }
2203
2204    /**
2205     * Calculates the Laplacian of an image.
2206     *
2207     * The function calculates the Laplacian of the source image by adding up the second x and y
2208     * derivatives calculated using the Sobel operator:
2209     *
2210     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2211     *
2212     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2213     * with the following \(3 \times 3\) aperture:
2214     *
2215     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2216     *
2217     * @param src Source image.
2218     * @param dst Destination image of the same size and the same number of channels as src .
2219     * @param ddepth Desired depth of the destination image, see REF: filter_depths "combinations".
2220     * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
2221     * details. The size must be positive and odd.
2222     * applied. See #getDerivKernels for details.
2223     * SEE:  Sobel, Scharr
2224     */
2225    public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize) {
2226        Laplacian_3(src.nativeObj, dst.nativeObj, ddepth, ksize);
2227    }
2228
2229    /**
2230     * Calculates the Laplacian of an image.
2231     *
2232     * The function calculates the Laplacian of the source image by adding up the second x and y
2233     * derivatives calculated using the Sobel operator:
2234     *
2235     * \(\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\)
2236     *
2237     * This is done when {@code ksize &gt; 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image
2238     * with the following \(3 \times 3\) aperture:
2239     *
2240     * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\)
2241     *
2242     * @param src Source image.
2243     * @param dst Destination image of the same size and the same number of channels as src .
2244     * @param ddepth Desired depth of the destination image, see REF: filter_depths "combinations".
2245     * details. The size must be positive and odd.
2246     * applied. See #getDerivKernels for details.
2247     * SEE:  Sobel, Scharr
2248     */
2249    public static void Laplacian(Mat src, Mat dst, int ddepth) {
2250        Laplacian_4(src.nativeObj, dst.nativeObj, ddepth);
2251    }
2252
2253
2254    //
2255    // C++:  void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
2256    //
2257
2258    /**
2259     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2260     *
2261     * The function finds edges in the input image and marks them in the output map edges using the
2262     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2263     * largest value is used to find initial segments of strong edges. See
2264     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2265     *
2266     * @param image 8-bit input image.
2267     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2268     * @param threshold1 first threshold for the hysteresis procedure.
2269     * @param threshold2 second threshold for the hysteresis procedure.
2270     * @param apertureSize aperture size for the Sobel operator.
2271     * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm
2272     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2273     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2274     * L2gradient=false ).
2275     */
2276    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) {
2277        Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient);
2278    }
2279
2280    /**
2281     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2282     *
2283     * The function finds edges in the input image and marks them in the output map edges using the
2284     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2285     * largest value is used to find initial segments of strong edges. See
2286     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2287     *
2288     * @param image 8-bit input image.
2289     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2290     * @param threshold1 first threshold for the hysteresis procedure.
2291     * @param threshold2 second threshold for the hysteresis procedure.
2292     * @param apertureSize aperture size for the Sobel operator.
2293     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2294     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2295     * L2gradient=false ).
2296     */
2297    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize) {
2298        Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize);
2299    }
2300
2301    /**
2302     * Finds edges in an image using the Canny algorithm CITE: Canny86 .
2303     *
2304     * The function finds edges in the input image and marks them in the output map edges using the
2305     * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
2306     * largest value is used to find initial segments of strong edges. See
2307     * &lt;http://en.wikipedia.org/wiki/Canny_edge_detector&gt;
2308     *
2309     * @param image 8-bit input image.
2310     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2311     * @param threshold1 first threshold for the hysteresis procedure.
2312     * @param threshold2 second threshold for the hysteresis procedure.
2313     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2314     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2315     * L2gradient=false ).
2316     */
2317    public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) {
2318        Canny_2(image.nativeObj, edges.nativeObj, threshold1, threshold2);
2319    }
2320
2321
2322    //
2323    // C++:  void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false)
2324    //
2325
2326    /**
2327     * \overload
2328     *
2329     * Finds edges in an image using the Canny algorithm with custom image gradient.
2330     *
2331     * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
2332     * @param dy 16-bit y derivative of input image (same type as dx).
2333     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2334     * @param threshold1 first threshold for the hysteresis procedure.
2335     * @param threshold2 second threshold for the hysteresis procedure.
2336     * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm
2337     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2338     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2339     * L2gradient=false ).
2340     */
2341    public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2, boolean L2gradient) {
2342        Canny_3(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2, L2gradient);
2343    }
2344
2345    /**
2346     * \overload
2347     *
2348     * Finds edges in an image using the Canny algorithm with custom image gradient.
2349     *
2350     * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
2351     * @param dy 16-bit y derivative of input image (same type as dx).
2352     * @param edges output edge map; single channels 8-bit image, which has the same size as image .
2353     * @param threshold1 first threshold for the hysteresis procedure.
2354     * @param threshold2 second threshold for the hysteresis procedure.
2355     * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude (
2356     * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough (
2357     * L2gradient=false ).
2358     */
2359    public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2) {
2360        Canny_4(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2);
2361    }
2362
2363
2364    //
2365    // C++:  void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
2366    //
2367
2368    /**
2369     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2370     *
2371     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2372     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2373     * of the formulae in the cornerEigenValsAndVecs description.
2374     *
2375     * @param src Input single-channel 8-bit or floating-point image.
2376     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2377     * src .
2378     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2379     * @param ksize Aperture parameter for the Sobel operator.
2380     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2381     */
2382    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType) {
2383        cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
2384    }
2385
2386    /**
2387     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2388     *
2389     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2390     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2391     * of the formulae in the cornerEigenValsAndVecs description.
2392     *
2393     * @param src Input single-channel 8-bit or floating-point image.
2394     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2395     * src .
2396     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2397     * @param ksize Aperture parameter for the Sobel operator.
2398     */
2399    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize) {
2400        cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
2401    }
2402
2403    /**
2404     * Calculates the minimal eigenvalue of gradient matrices for corner detection.
2405     *
2406     * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
2407     * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms
2408     * of the formulae in the cornerEigenValsAndVecs description.
2409     *
2410     * @param src Input single-channel 8-bit or floating-point image.
2411     * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as
2412     * src .
2413     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2414     */
2415    public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize) {
2416        cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize);
2417    }
2418
2419
2420    //
2421    // C++:  void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
2422    //
2423
2424    /**
2425     * Harris corner detector.
2426     *
2427     * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
2428     * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance
2429     * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it
2430     * computes the following characteristic:
2431     *
2432     * \(\texttt{dst} (x,y) =  \mathrm{det} M^{(x,y)} - k  \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\)
2433     *
2434     * Corners in the image can be found as the local maxima of this response map.
2435     *
2436     * @param src Input single-channel 8-bit or floating-point image.
2437     * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
2438     * size as src .
2439     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2440     * @param ksize Aperture parameter for the Sobel operator.
2441     * @param k Harris detector free parameter. See the formula above.
2442     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2443     */
2444    public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType) {
2445        cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType);
2446    }
2447
2448    /**
2449     * Harris corner detector.
2450     *
2451     * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
2452     * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance
2453     * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it
2454     * computes the following characteristic:
2455     *
2456     * \(\texttt{dst} (x,y) =  \mathrm{det} M^{(x,y)} - k  \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\)
2457     *
2458     * Corners in the image can be found as the local maxima of this response map.
2459     *
2460     * @param src Input single-channel 8-bit or floating-point image.
2461     * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same
2462     * size as src .
2463     * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
2464     * @param ksize Aperture parameter for the Sobel operator.
2465     * @param k Harris detector free parameter. See the formula above.
2466     */
2467    public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k) {
2468        cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k);
2469    }
2470
2471
2472    //
2473    // C++:  void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
2474    //
2475
2476    /**
2477     * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
2478     *
2479     * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize
2480     * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as:
2481     *
2482     * \(M =  \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 &amp;  \sum _{S(p)}dI/dx dI/dy  \\ \sum _{S(p)}dI/dx dI/dy &amp;  \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\)
2483     *
2484     * where the derivatives are computed using the Sobel operator.
2485     *
2486     * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as
2487     * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where
2488     *
2489     * <ul>
2490     *   <li>
2491     *    \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\)
2492     *   </li>
2493     *   <li>
2494     *    \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\)
2495     *   </li>
2496     *   <li>
2497     *    \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\)
2498     *   </li>
2499     * </ul>
2500     *
2501     * The output of the function can be used for robust edge or corner detection.
2502     *
2503     * @param src Input single-channel 8-bit or floating-point image.
2504     * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
2505     * @param blockSize Neighborhood size (see details below).
2506     * @param ksize Aperture parameter for the Sobel operator.
2507     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2508     *
2509     * SEE:  cornerMinEigenVal, cornerHarris, preCornerDetect
2510     */
2511    public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType) {
2512        cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
2513    }
2514
2515    /**
2516     * Calculates eigenvalues and eigenvectors of image blocks for corner detection.
2517     *
2518     * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize
2519     * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as:
2520     *
2521     * \(M =  \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 &amp;  \sum _{S(p)}dI/dx dI/dy  \\ \sum _{S(p)}dI/dx dI/dy &amp;  \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\)
2522     *
2523     * where the derivatives are computed using the Sobel operator.
2524     *
2525     * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as
2526     * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where
2527     *
2528     * <ul>
2529     *   <li>
2530     *    \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\)
2531     *   </li>
2532     *   <li>
2533     *    \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\)
2534     *   </li>
2535     *   <li>
2536     *    \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\)
2537     *   </li>
2538     * </ul>
2539     *
2540     * The output of the function can be used for robust edge or corner detection.
2541     *
2542     * @param src Input single-channel 8-bit or floating-point image.
2543     * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
2544     * @param blockSize Neighborhood size (see details below).
2545     * @param ksize Aperture parameter for the Sobel operator.
2546     *
2547     * SEE:  cornerMinEigenVal, cornerHarris, preCornerDetect
2548     */
2549    public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize) {
2550        cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
2551    }
2552
2553
2554    //
2555    // C++:  void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT)
2556    //
2557
2558    /**
2559     * Calculates a feature map for corner detection.
2560     *
2561     * The function calculates the complex spatial derivative-based function of the source image
2562     *
2563     * \(\texttt{dst} = (D_x  \texttt{src} )^2  \cdot D_{yy}  \texttt{src} + (D_y  \texttt{src} )^2  \cdot D_{xx}  \texttt{src} - 2 D_x  \texttt{src} \cdot D_y  \texttt{src} \cdot D_{xy}  \texttt{src}\)
2564     *
2565     * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image
2566     * derivatives, and \(D_{xy}\) is the mixed derivative.
2567     *
2568     * The corners can be found as local maximums of the functions, as shown below:
2569     * <code>
2570     *     Mat corners, dilated_corners;
2571     *     preCornerDetect(image, corners, 3);
2572     *     // dilation with 3x3 rectangular structuring element
2573     *     dilate(corners, dilated_corners, Mat(), 1);
2574     *     Mat corner_mask = corners == dilated_corners;
2575     * </code>
2576     *
2577     * @param src Source single-channel 8-bit of floating-point image.
2578     * @param dst Output image that has the type CV_32F and the same size as src .
2579     * @param ksize %Aperture size of the Sobel .
2580     * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
2581     */
2582    public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType) {
2583        preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType);
2584    }
2585
2586    /**
2587     * Calculates a feature map for corner detection.
2588     *
2589     * The function calculates the complex spatial derivative-based function of the source image
2590     *
2591     * \(\texttt{dst} = (D_x  \texttt{src} )^2  \cdot D_{yy}  \texttt{src} + (D_y  \texttt{src} )^2  \cdot D_{xx}  \texttt{src} - 2 D_x  \texttt{src} \cdot D_y  \texttt{src} \cdot D_{xy}  \texttt{src}\)
2592     *
2593     * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image
2594     * derivatives, and \(D_{xy}\) is the mixed derivative.
2595     *
2596     * The corners can be found as local maximums of the functions, as shown below:
2597     * <code>
2598     *     Mat corners, dilated_corners;
2599     *     preCornerDetect(image, corners, 3);
2600     *     // dilation with 3x3 rectangular structuring element
2601     *     dilate(corners, dilated_corners, Mat(), 1);
2602     *     Mat corner_mask = corners == dilated_corners;
2603     * </code>
2604     *
2605     * @param src Source single-channel 8-bit of floating-point image.
2606     * @param dst Output image that has the type CV_32F and the same size as src .
2607     * @param ksize %Aperture size of the Sobel .
2608     */
2609    public static void preCornerDetect(Mat src, Mat dst, int ksize) {
2610        preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize);
2611    }
2612
2613
2614    //
2615    // C++:  void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria)
2616    //
2617
2618    /**
2619     * Refines the corner locations.
2620     *
2621     * The function iterates to find the sub-pixel accurate location of corners or radial saddle
2622     * points as described in CITE: forstner1987fast, and as shown on the figure below.
2623     *
2624     * ![image](pics/cornersubpix.png)
2625     *
2626     * Sub-pixel accurate corner locator is based on the observation that every vector from the center \(q\)
2627     * to a point \(p\) located within a neighborhood of \(q\) is orthogonal to the image gradient at \(p\)
2628     * subject to image and measurement noise. Consider the expression:
2629     *
2630     * \(\epsilon _i = {DI_{p_i}}^T  \cdot (q - p_i)\)
2631     *
2632     * where \({DI_{p_i}}\) is an image gradient at one of the points \(p_i\) in a neighborhood of \(q\) . The
2633     * value of \(q\) is to be found so that \(\epsilon_i\) is minimized. A system of equations may be set up
2634     * with \(\epsilon_i\) set to zero:
2635     *
2636     * \(\sum _i(DI_{p_i}  \cdot {DI_{p_i}}^T) \cdot q -  \sum _i(DI_{p_i}  \cdot {DI_{p_i}}^T  \cdot p_i)\)
2637     *
2638     * where the gradients are summed within a neighborhood ("search window") of \(q\) . Calling the first
2639     * gradient term \(G\) and the second gradient term \(b\) gives:
2640     *
2641     * \(q = G^{-1}  \cdot b\)
2642     *
2643     * The algorithm sets the center of the neighborhood window at this new center \(q\) and then iterates
2644     * until the center stays within a set threshold.
2645     *
2646     * @param image Input single-channel, 8-bit or float image.
2647     * @param corners Initial coordinates of the input corners and refined coordinates provided for
2648     * output.
2649     * @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,
2650     * then a \((5*2+1) \times (5*2+1) = 11 \times 11\) search window is used.
2651     * @param zeroZone Half of the size of the dead region in the middle of the search zone over which
2652     * the summation in the formula below is not done. It is used sometimes to avoid possible
2653     * singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such
2654     * a size.
2655     * @param criteria Criteria for termination of the iterative process of corner refinement. That is,
2656     * the process of corner position refinement stops either after criteria.maxCount iterations or when
2657     * the corner position moves by less than criteria.epsilon on some iteration.
2658     */
2659    public static void cornerSubPix(Mat image, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria) {
2660        cornerSubPix_0(image.nativeObj, corners.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon);
2661    }
2662
2663
2664    //
2665    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
2666    //
2667
2668    /**
2669     * Determines strong corners on an image.
2670     *
2671     * The function finds the most prominent corners in the image or in the specified image region, as
2672     * described in CITE: Shi94
2673     *
2674     * <ul>
2675     *   <li>
2676     *    Function calculates the corner quality measure at every source image pixel using the
2677     *     #cornerMinEigenVal or #cornerHarris .
2678     *   </li>
2679     *   <li>
2680     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2681     *     retained).
2682     *   </li>
2683     *   <li>
2684     *    The corners with the minimal eigenvalue less than
2685     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2686     *   </li>
2687     *   <li>
2688     *    The remaining corners are sorted by the quality measure in the descending order.
2689     *   </li>
2690     *   <li>
2691     *    Function throws away each corner for which there is a stronger corner at a distance less than
2692     *     maxDistance.
2693     *   </li>
2694     * </ul>
2695     *
2696     * The function can be used to initialize a point-based tracker of an object.
2697     *
2698     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2699     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2700     * with qualityLevel=B .
2701     *
2702     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2703     * @param corners Output vector of detected corners.
2704     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2705     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2706     * and all detected corners are returned.
2707     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2708     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2709     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2710     * quality measure less than the product are rejected. For example, if the best corner has the
2711     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2712     * less than 15 are rejected.
2713     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2714     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2715     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2716     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2717     * pixel neighborhood. See cornerEigenValsAndVecs .
2718     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
2719     * or #cornerMinEigenVal.
2720     * @param k Free parameter of the Harris detector.
2721     *
2722     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2723     */
2724    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k) {
2725        Mat corners_mat = corners;
2726        goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k);
2727    }
2728
2729    /**
2730     * Determines strong corners on an image.
2731     *
2732     * The function finds the most prominent corners in the image or in the specified image region, as
2733     * described in CITE: Shi94
2734     *
2735     * <ul>
2736     *   <li>
2737     *    Function calculates the corner quality measure at every source image pixel using the
2738     *     #cornerMinEigenVal or #cornerHarris .
2739     *   </li>
2740     *   <li>
2741     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2742     *     retained).
2743     *   </li>
2744     *   <li>
2745     *    The corners with the minimal eigenvalue less than
2746     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2747     *   </li>
2748     *   <li>
2749     *    The remaining corners are sorted by the quality measure in the descending order.
2750     *   </li>
2751     *   <li>
2752     *    Function throws away each corner for which there is a stronger corner at a distance less than
2753     *     maxDistance.
2754     *   </li>
2755     * </ul>
2756     *
2757     * The function can be used to initialize a point-based tracker of an object.
2758     *
2759     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2760     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2761     * with qualityLevel=B .
2762     *
2763     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2764     * @param corners Output vector of detected corners.
2765     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2766     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2767     * and all detected corners are returned.
2768     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2769     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2770     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2771     * quality measure less than the product are rejected. For example, if the best corner has the
2772     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2773     * less than 15 are rejected.
2774     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2775     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2776     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2777     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2778     * pixel neighborhood. See cornerEigenValsAndVecs .
2779     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
2780     * or #cornerMinEigenVal.
2781     *
2782     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2783     */
2784    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector) {
2785        Mat corners_mat = corners;
2786        goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector);
2787    }
2788
2789    /**
2790     * Determines strong corners on an image.
2791     *
2792     * The function finds the most prominent corners in the image or in the specified image region, as
2793     * described in CITE: Shi94
2794     *
2795     * <ul>
2796     *   <li>
2797     *    Function calculates the corner quality measure at every source image pixel using the
2798     *     #cornerMinEigenVal or #cornerHarris .
2799     *   </li>
2800     *   <li>
2801     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2802     *     retained).
2803     *   </li>
2804     *   <li>
2805     *    The corners with the minimal eigenvalue less than
2806     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2807     *   </li>
2808     *   <li>
2809     *    The remaining corners are sorted by the quality measure in the descending order.
2810     *   </li>
2811     *   <li>
2812     *    Function throws away each corner for which there is a stronger corner at a distance less than
2813     *     maxDistance.
2814     *   </li>
2815     * </ul>
2816     *
2817     * The function can be used to initialize a point-based tracker of an object.
2818     *
2819     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2820     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2821     * with qualityLevel=B .
2822     *
2823     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2824     * @param corners Output vector of detected corners.
2825     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2826     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2827     * and all detected corners are returned.
2828     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2829     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2830     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2831     * quality measure less than the product are rejected. For example, if the best corner has the
2832     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2833     * less than 15 are rejected.
2834     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2835     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2836     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2837     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
2838     * pixel neighborhood. See cornerEigenValsAndVecs .
2839     * or #cornerMinEigenVal.
2840     *
2841     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2842     */
2843    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize) {
2844        Mat corners_mat = corners;
2845        goodFeaturesToTrack_2(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize);
2846    }
2847
2848    /**
2849     * Determines strong corners on an image.
2850     *
2851     * The function finds the most prominent corners in the image or in the specified image region, as
2852     * described in CITE: Shi94
2853     *
2854     * <ul>
2855     *   <li>
2856     *    Function calculates the corner quality measure at every source image pixel using the
2857     *     #cornerMinEigenVal or #cornerHarris .
2858     *   </li>
2859     *   <li>
2860     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2861     *     retained).
2862     *   </li>
2863     *   <li>
2864     *    The corners with the minimal eigenvalue less than
2865     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2866     *   </li>
2867     *   <li>
2868     *    The remaining corners are sorted by the quality measure in the descending order.
2869     *   </li>
2870     *   <li>
2871     *    Function throws away each corner for which there is a stronger corner at a distance less than
2872     *     maxDistance.
2873     *   </li>
2874     * </ul>
2875     *
2876     * The function can be used to initialize a point-based tracker of an object.
2877     *
2878     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2879     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2880     * with qualityLevel=B .
2881     *
2882     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2883     * @param corners Output vector of detected corners.
2884     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2885     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2886     * and all detected corners are returned.
2887     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2888     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2889     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2890     * quality measure less than the product are rejected. For example, if the best corner has the
2891     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2892     * less than 15 are rejected.
2893     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2894     * @param mask Optional region of interest. If the image is not empty (it needs to have the type
2895     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2896     * pixel neighborhood. See cornerEigenValsAndVecs .
2897     * or #cornerMinEigenVal.
2898     *
2899     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2900     */
2901    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask) {
2902        Mat corners_mat = corners;
2903        goodFeaturesToTrack_3(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj);
2904    }
2905
2906    /**
2907     * Determines strong corners on an image.
2908     *
2909     * The function finds the most prominent corners in the image or in the specified image region, as
2910     * described in CITE: Shi94
2911     *
2912     * <ul>
2913     *   <li>
2914     *    Function calculates the corner quality measure at every source image pixel using the
2915     *     #cornerMinEigenVal or #cornerHarris .
2916     *   </li>
2917     *   <li>
2918     *    Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
2919     *     retained).
2920     *   </li>
2921     *   <li>
2922     *    The corners with the minimal eigenvalue less than
2923     *     \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected.
2924     *   </li>
2925     *   <li>
2926     *    The remaining corners are sorted by the quality measure in the descending order.
2927     *   </li>
2928     *   <li>
2929     *    Function throws away each corner for which there is a stronger corner at a distance less than
2930     *     maxDistance.
2931     *   </li>
2932     * </ul>
2933     *
2934     * The function can be used to initialize a point-based tracker of an object.
2935     *
2936     * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and
2937     * A &gt; B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
2938     * with qualityLevel=B .
2939     *
2940     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2941     * @param corners Output vector of detected corners.
2942     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2943     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2944     * and all detected corners are returned.
2945     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2946     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2947     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2948     * quality measure less than the product are rejected. For example, if the best corner has the
2949     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
2950     * less than 15 are rejected.
2951     * @param minDistance Minimum possible Euclidean distance between the returned corners.
2952     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
2953     * pixel neighborhood. See cornerEigenValsAndVecs .
2954     * or #cornerMinEigenVal.
2955     *
2956     * SEE:  cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,
2957     */
2958    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance) {
2959        Mat corners_mat = corners;
2960        goodFeaturesToTrack_4(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance);
2961    }
2962
2963
2964    //
2965    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04)
2966    //
2967
2968    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector, double k) {
2969        Mat corners_mat = corners;
2970        goodFeaturesToTrack_5(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector, k);
2971    }
2972
2973    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector) {
2974        Mat corners_mat = corners;
2975        goodFeaturesToTrack_6(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector);
2976    }
2977
2978    public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize) {
2979        Mat corners_mat = corners;
2980        goodFeaturesToTrack_7(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize);
2981    }
2982
2983
2984    //
2985    // C++:  void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04)
2986    //
2987
2988    /**
2989     * Same as above, but returns also quality measure of the detected corners.
2990     *
2991     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
2992     * @param corners Output vector of detected corners.
2993     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
2994     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
2995     * and all detected corners are returned.
2996     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
2997     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
2998     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
2999     * quality measure less than the product are rejected. For example, if the best corner has the
3000     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3001     * less than 15 are rejected.
3002     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3003     * @param mask Region of interest. If the image is not empty (it needs to have the type
3004     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3005     * @param cornersQuality Output vector of quality measure of the detected corners.
3006     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3007     * pixel neighborhood. See cornerEigenValsAndVecs .
3008     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
3009     * See cornerEigenValsAndVecs .
3010     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
3011     * or #cornerMinEigenVal.
3012     * @param k Free parameter of the Harris detector.
3013     */
3014    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector, double k) {
3015        goodFeaturesToTrackWithQuality_0(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector, k);
3016    }
3017
3018    /**
3019     * Same as above, but returns also quality measure of the detected corners.
3020     *
3021     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3022     * @param corners Output vector of detected corners.
3023     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3024     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3025     * and all detected corners are returned.
3026     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3027     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3028     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3029     * quality measure less than the product are rejected. For example, if the best corner has the
3030     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3031     * less than 15 are rejected.
3032     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3033     * @param mask Region of interest. If the image is not empty (it needs to have the type
3034     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3035     * @param cornersQuality Output vector of quality measure of the detected corners.
3036     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3037     * pixel neighborhood. See cornerEigenValsAndVecs .
3038     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
3039     * See cornerEigenValsAndVecs .
3040     * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
3041     * or #cornerMinEigenVal.
3042     */
3043    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector) {
3044        goodFeaturesToTrackWithQuality_1(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector);
3045    }
3046
3047    /**
3048     * Same as above, but returns also quality measure of the detected corners.
3049     *
3050     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3051     * @param corners Output vector of detected corners.
3052     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3053     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3054     * and all detected corners are returned.
3055     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3056     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3057     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3058     * quality measure less than the product are rejected. For example, if the best corner has the
3059     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3060     * less than 15 are rejected.
3061     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3062     * @param mask Region of interest. If the image is not empty (it needs to have the type
3063     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3064     * @param cornersQuality Output vector of quality measure of the detected corners.
3065     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3066     * pixel neighborhood. See cornerEigenValsAndVecs .
3067     * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.
3068     * See cornerEigenValsAndVecs .
3069     * or #cornerMinEigenVal.
3070     */
3071    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize) {
3072        goodFeaturesToTrackWithQuality_2(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize);
3073    }
3074
3075    /**
3076     * Same as above, but returns also quality measure of the detected corners.
3077     *
3078     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3079     * @param corners Output vector of detected corners.
3080     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3081     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3082     * and all detected corners are returned.
3083     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3084     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3085     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3086     * quality measure less than the product are rejected. For example, if the best corner has the
3087     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3088     * less than 15 are rejected.
3089     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3090     * @param mask Region of interest. If the image is not empty (it needs to have the type
3091     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3092     * @param cornersQuality Output vector of quality measure of the detected corners.
3093     * @param blockSize Size of an average block for computing a derivative covariation matrix over each
3094     * pixel neighborhood. See cornerEigenValsAndVecs .
3095     * See cornerEigenValsAndVecs .
3096     * or #cornerMinEigenVal.
3097     */
3098    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize) {
3099        goodFeaturesToTrackWithQuality_3(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize);
3100    }
3101
3102    /**
3103     * Same as above, but returns also quality measure of the detected corners.
3104     *
3105     * @param image Input 8-bit or floating-point 32-bit, single-channel image.
3106     * @param corners Output vector of detected corners.
3107     * @param maxCorners Maximum number of corners to return. If there are more corners than are found,
3108     * the strongest of them is returned. {@code maxCorners &lt;= 0} implies that no limit on the maximum is set
3109     * and all detected corners are returned.
3110     * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
3111     * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
3112     * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
3113     * quality measure less than the product are rejected. For example, if the best corner has the
3114     * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
3115     * less than 15 are rejected.
3116     * @param minDistance Minimum possible Euclidean distance between the returned corners.
3117     * @param mask Region of interest. If the image is not empty (it needs to have the type
3118     * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
3119     * @param cornersQuality Output vector of quality measure of the detected corners.
3120     * pixel neighborhood. See cornerEigenValsAndVecs .
3121     * See cornerEigenValsAndVecs .
3122     * or #cornerMinEigenVal.
3123     */
3124    public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality) {
3125        goodFeaturesToTrackWithQuality_4(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj);
3126    }
3127
3128
3129    //
3130    // C++:  void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
3131    //
3132
3133    /**
3134     * Finds lines in a binary image using the standard Hough transform.
3135     *
3136     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3137     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3138     * transform.
3139     *
3140     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3141     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3142     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\), where \(\rho\) is the distance from
3143     * the coordinate origin \((0,0)\) (top-left corner of the image), \(\theta\) is the line rotation
3144     * angle in radians ( \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ), and
3145     * \(\textrm{votes}\) is the value of accumulator.
3146     * @param rho Distance resolution of the accumulator in pixels.
3147     * @param theta Angle resolution of the accumulator in radians.
3148     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3149     * votes ( \(&gt;\texttt{threshold}\) ).
3150     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho.
3151     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3152     * rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
3153     * parameters should be positive.
3154     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3155     * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
3156     * Must fall between 0 and max_theta.
3157     * @param max_theta For standard and multi-scale Hough transform, an upper bound for the angle.
3158     * Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
3159     * less than max_theta, depending on the parameters min_theta and theta.
3160     */
3161    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) {
3162        HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta);
3163    }
3164
3165    /**
3166     * Finds lines in a binary image using the standard Hough transform.
3167     *
3168     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3169     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3170     * transform.
3171     *
3172     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3173     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3174     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\), where \(\rho\) is the distance from
3175     * the coordinate origin \((0,0)\) (top-left corner of the image), \(\theta\) is the line rotation
3176     * angle in radians ( \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ), and
3177     * \(\textrm{votes}\) is the value of accumulator.
3178     * @param rho Distance resolution of the accumulator in pixels.
3179     * @param theta Angle resolution of the accumulator in radians.
3180     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3181     * votes ( \(&gt;\texttt{threshold}\) ).
3182     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho.
3183     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3184     * rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
3185     * parameters should be positive.
3186     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3187     * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
3188     * Must fall between 0 and max_theta.
3189     * Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
3190     * less than max_theta, depending on the parameters min_theta and theta.
3191     */
3192    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) {
3193        HoughLines_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta);
3194    }
3195
3196    /**
3197     * Finds lines in a binary image using the standard Hough transform.
3198     *
3199     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3200     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3201     * transform.
3202     *
3203     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3204     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3205     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\), where \(\rho\) is the distance from
3206     * the coordinate origin \((0,0)\) (top-left corner of the image), \(\theta\) is the line rotation
3207     * angle in radians ( \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ), and
3208     * \(\textrm{votes}\) is the value of accumulator.
3209     * @param rho Distance resolution of the accumulator in pixels.
3210     * @param theta Angle resolution of the accumulator in radians.
3211     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3212     * votes ( \(&gt;\texttt{threshold}\) ).
3213     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho.
3214     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3215     * rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
3216     * parameters should be positive.
3217     * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
3218     * Must fall between 0 and max_theta.
3219     * Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
3220     * less than max_theta, depending on the parameters min_theta and theta.
3221     */
3222    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) {
3223        HoughLines_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn);
3224    }
3225
3226    /**
3227     * Finds lines in a binary image using the standard Hough transform.
3228     *
3229     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3230     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3231     * transform.
3232     *
3233     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3234     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3235     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\), where \(\rho\) is the distance from
3236     * the coordinate origin \((0,0)\) (top-left corner of the image), \(\theta\) is the line rotation
3237     * angle in radians ( \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ), and
3238     * \(\textrm{votes}\) is the value of accumulator.
3239     * @param rho Distance resolution of the accumulator in pixels.
3240     * @param theta Angle resolution of the accumulator in radians.
3241     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3242     * votes ( \(&gt;\texttt{threshold}\) ).
3243     * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho.
3244     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3245     * rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
3246     * parameters should be positive.
3247     * Must fall between 0 and max_theta.
3248     * Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
3249     * less than max_theta, depending on the parameters min_theta and theta.
3250     */
3251    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn) {
3252        HoughLines_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn);
3253    }
3254
3255    /**
3256     * Finds lines in a binary image using the standard Hough transform.
3257     *
3258     * The function implements the standard or standard multi-scale Hough transform algorithm for line
3259     * detection. See &lt;http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm&gt; for a good explanation of Hough
3260     * transform.
3261     *
3262     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3263     * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector
3264     * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\), where \(\rho\) is the distance from
3265     * the coordinate origin \((0,0)\) (top-left corner of the image), \(\theta\) is the line rotation
3266     * angle in radians ( \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ), and
3267     * \(\textrm{votes}\) is the value of accumulator.
3268     * @param rho Distance resolution of the accumulator in pixels.
3269     * @param theta Angle resolution of the accumulator in radians.
3270     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3271     * votes ( \(&gt;\texttt{threshold}\) ).
3272     * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is
3273     * rho/srn. If both srn=0 and stn=0, the classical Hough transform is used. Otherwise, both these
3274     * parameters should be positive.
3275     * Must fall between 0 and max_theta.
3276     * Must fall between min_theta and CV_PI. The actual maximum angle in the accumulator may be slightly
3277     * less than max_theta, depending on the parameters min_theta and theta.
3278     */
3279    public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold) {
3280        HoughLines_4(image.nativeObj, lines.nativeObj, rho, theta, threshold);
3281    }
3282
3283
3284    //
3285    // C++:  void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
3286    //
3287
3288    /**
3289     * Finds line segments in a binary image using the probabilistic Hough transform.
3290     *
3291     * The function implements the probabilistic Hough transform algorithm for line detection, described
3292     * in CITE: Matas00
3293     *
3294     * See the line detection example below:
3295     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3296     * This is a sample picture the function parameters have been tuned for:
3297     *
3298     * ![image](pics/building.jpg)
3299     *
3300     * And this is the output of the above program in case of the probabilistic Hough transform:
3301     *
3302     * ![image](pics/houghp.png)
3303     *
3304     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3305     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3306     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3307     * line segment.
3308     * @param rho Distance resolution of the accumulator in pixels.
3309     * @param theta Angle resolution of the accumulator in radians.
3310     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3311     * votes ( \(&gt;\texttt{threshold}\) ).
3312     * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
3313     * @param maxLineGap Maximum allowed gap between points on the same line to link them.
3314     *
3315     * SEE: LineSegmentDetector
3316     */
3317    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) {
3318        HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap);
3319    }
3320
3321    /**
3322     * Finds line segments in a binary image using the probabilistic Hough transform.
3323     *
3324     * The function implements the probabilistic Hough transform algorithm for line detection, described
3325     * in CITE: Matas00
3326     *
3327     * See the line detection example below:
3328     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3329     * This is a sample picture the function parameters have been tuned for:
3330     *
3331     * ![image](pics/building.jpg)
3332     *
3333     * And this is the output of the above program in case of the probabilistic Hough transform:
3334     *
3335     * ![image](pics/houghp.png)
3336     *
3337     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3338     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3339     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3340     * line segment.
3341     * @param rho Distance resolution of the accumulator in pixels.
3342     * @param theta Angle resolution of the accumulator in radians.
3343     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3344     * votes ( \(&gt;\texttt{threshold}\) ).
3345     * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
3346     *
3347     * SEE: LineSegmentDetector
3348     */
3349    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength) {
3350        HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength);
3351    }
3352
3353    /**
3354     * Finds line segments in a binary image using the probabilistic Hough transform.
3355     *
3356     * The function implements the probabilistic Hough transform algorithm for line detection, described
3357     * in CITE: Matas00
3358     *
3359     * See the line detection example below:
3360     * INCLUDE: snippets/imgproc_HoughLinesP.cpp
3361     * This is a sample picture the function parameters have been tuned for:
3362     *
3363     * ![image](pics/building.jpg)
3364     *
3365     * And this is the output of the above program in case of the probabilistic Hough transform:
3366     *
3367     * ![image](pics/houghp.png)
3368     *
3369     * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
3370     * @param lines Output vector of lines. Each line is represented by a 4-element vector
3371     * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected
3372     * line segment.
3373     * @param rho Distance resolution of the accumulator in pixels.
3374     * @param theta Angle resolution of the accumulator in radians.
3375     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3376     * votes ( \(&gt;\texttt{threshold}\) ).
3377     *
3378     * SEE: LineSegmentDetector
3379     */
3380    public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) {
3381        HoughLinesP_2(image.nativeObj, lines.nativeObj, rho, theta, threshold);
3382    }
3383
3384
3385    //
3386    // C++:  void cv::HoughLinesPointSet(Mat point, Mat& lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step)
3387    //
3388
3389    /**
3390     * Finds lines in a set of points using the standard Hough transform.
3391     *
3392     * The function finds lines in a set of points using a modification of the Hough transform.
3393     * INCLUDE: snippets/imgproc_HoughLinesPointSet.cpp
3394     * @param point Input vector of points. Each vector must be encoded as a Point vector \((x,y)\). Type must be CV_32FC2 or CV_32SC2.
3395     * @param lines Output vector of found lines. Each vector is encoded as a vector&lt;Vec3d&gt; \((votes, rho, theta)\).
3396     * The larger the value of 'votes', the higher the reliability of the Hough line.
3397     * @param lines_max Max count of Hough lines.
3398     * @param threshold %Accumulator threshold parameter. Only those lines are returned that get enough
3399     * votes ( \(&gt;\texttt{threshold}\) ).
3400     * @param min_rho Minimum value for \(\rho\) for the accumulator (Note: \(\rho\) can be negative. The absolute value \(|\rho|\) is the distance of a line to the origin.).
3401     * @param max_rho Maximum value for \(\rho\) for the accumulator.
3402     * @param rho_step Distance resolution of the accumulator.
3403     * @param min_theta Minimum angle value of the accumulator in radians.
3404     * @param max_theta Upper bound for the angle value of the accumulator in radians. The actual maximum
3405     * angle may be slightly less than max_theta, depending on the parameters min_theta and theta_step.
3406     * @param theta_step Angle resolution of the accumulator in radians.
3407     */
3408    public static void HoughLinesPointSet(Mat point, Mat lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step) {
3409        HoughLinesPointSet_0(point.nativeObj, lines.nativeObj, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step);
3410    }
3411
3412
3413    //
3414    // C++:  void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
3415    //
3416
3417    /**
3418     * Finds circles in a grayscale image using the Hough transform.
3419     *
3420     * The function finds circles in a grayscale image using a modification of the Hough transform.
3421     *
3422     * Example: :
3423     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3424     *
3425     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3426     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3427     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3428     * to return centers only without radius search, and find the correct radius using an additional procedure.
3429     *
3430     * It also helps to smooth image a bit unless it's already soft. For example,
3431     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3432     *
3433     * @param image 8-bit, single-channel, grayscale input image.
3434     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3435     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3436     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3437     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3438     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3439     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3440     * unless some small very circles need to be detected.
3441     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3442     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3443     * too large, some circles may be missed.
3444     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3445     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3446     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3447     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3448     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3449     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3450     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3451     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3452     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3453     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3454     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3455     * @param minRadius Minimum circle radius.
3456     * @param maxRadius Maximum circle radius. If &lt;= 0, uses the maximum image dimension. If &lt; 0, #HOUGH_GRADIENT returns
3457     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3458     *
3459     * SEE: fitEllipse, minEnclosingCircle
3460     */
3461    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius) {
3462        HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius);
3463    }
3464
3465    /**
3466     * Finds circles in a grayscale image using the Hough transform.
3467     *
3468     * The function finds circles in a grayscale image using a modification of the Hough transform.
3469     *
3470     * Example: :
3471     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3472     *
3473     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3474     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3475     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3476     * to return centers only without radius search, and find the correct radius using an additional procedure.
3477     *
3478     * It also helps to smooth image a bit unless it's already soft. For example,
3479     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3480     *
3481     * @param image 8-bit, single-channel, grayscale input image.
3482     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3483     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3484     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3485     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3486     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3487     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3488     * unless some small very circles need to be detected.
3489     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3490     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3491     * too large, some circles may be missed.
3492     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3493     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3494     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3495     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3496     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3497     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3498     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3499     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3500     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3501     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3502     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3503     * @param minRadius Minimum circle radius.
3504     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3505     *
3506     * SEE: fitEllipse, minEnclosingCircle
3507     */
3508    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius) {
3509        HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius);
3510    }
3511
3512    /**
3513     * Finds circles in a grayscale image using the Hough transform.
3514     *
3515     * The function finds circles in a grayscale image using a modification of the Hough transform.
3516     *
3517     * Example: :
3518     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3519     *
3520     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3521     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3522     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3523     * to return centers only without radius search, and find the correct radius using an additional procedure.
3524     *
3525     * It also helps to smooth image a bit unless it's already soft. For example,
3526     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3527     *
3528     * @param image 8-bit, single-channel, grayscale input image.
3529     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3530     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3531     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3532     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3533     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3534     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3535     * unless some small very circles need to be detected.
3536     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3537     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3538     * too large, some circles may be missed.
3539     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3540     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3541     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3542     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3543     * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the
3544     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3545     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3546     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3547     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3548     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3549     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3550     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3551     *
3552     * SEE: fitEllipse, minEnclosingCircle
3553     */
3554    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2) {
3555        HoughCircles_2(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2);
3556    }
3557
3558    /**
3559     * Finds circles in a grayscale image using the Hough transform.
3560     *
3561     * The function finds circles in a grayscale image using a modification of the Hough transform.
3562     *
3563     * Example: :
3564     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3565     *
3566     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3567     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3568     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3569     * to return centers only without radius search, and find the correct radius using an additional procedure.
3570     *
3571     * It also helps to smooth image a bit unless it's already soft. For example,
3572     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3573     *
3574     * @param image 8-bit, single-channel, grayscale input image.
3575     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3576     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3577     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3578     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3579     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3580     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3581     * unless some small very circles need to be detected.
3582     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3583     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3584     * too large, some circles may be missed.
3585     * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,
3586     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3587     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3588     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3589     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3590     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3591     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3592     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3593     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3594     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3595     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3596     *
3597     * SEE: fitEllipse, minEnclosingCircle
3598     */
3599    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1) {
3600        HoughCircles_3(image.nativeObj, circles.nativeObj, method, dp, minDist, param1);
3601    }
3602
3603    /**
3604     * Finds circles in a grayscale image using the Hough transform.
3605     *
3606     * The function finds circles in a grayscale image using a modification of the Hough transform.
3607     *
3608     * Example: :
3609     * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp
3610     *
3611     * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct
3612     * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
3613     * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number
3614     * to return centers only without radius search, and find the correct radius using an additional procedure.
3615     *
3616     * It also helps to smooth image a bit unless it's already soft. For example,
3617     * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.
3618     *
3619     * @param image 8-bit, single-channel, grayscale input image.
3620     * @param circles Output vector of found circles. Each vector is encoded as  3 or 4 element
3621     * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) .
3622     * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.
3623     * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if
3624     * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has
3625     * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,
3626     * unless some small very circles need to be detected.
3627     * @param minDist Minimum distance between the centers of the detected circles. If the parameter is
3628     * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is
3629     * too large, some circles may be missed.
3630     * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
3631     * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value
3632     * shough normally be higher, such as 300 or normally exposed and contrasty images.
3633     * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more
3634     * false circles may be detected. Circles, corresponding to the larger accumulator values, will be
3635     * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure.
3636     * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.
3637     * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.
3638     * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.
3639     * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.
3640     *
3641     * SEE: fitEllipse, minEnclosingCircle
3642     */
3643    public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist) {
3644        HoughCircles_4(image.nativeObj, circles.nativeObj, method, dp, minDist);
3645    }
3646
3647
3648    //
3649    // C++:  void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3650    //
3651
3652    /**
3653     * Erodes an image by using a specific structuring element.
3654     *
3655     * The function erodes the source image using the specified structuring element that determines the
3656     * shape of a pixel neighborhood over which the minimum is taken:
3657     *
3658     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3659     *
3660     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3661     * case of multi-channel images, each channel is processed independently.
3662     *
3663     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3664     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3665     * @param dst output image of the same size and type as src.
3666     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3667     * structuring element is used. Kernel can be created using #getStructuringElement.
3668     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3669     * anchor is at the element center.
3670     * @param iterations number of times erosion is applied.
3671     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3672     * @param borderValue border value in case of a constant border
3673     * SEE:  dilate, morphologyEx, getStructuringElement
3674     */
3675    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3676        erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3677    }
3678
3679    /**
3680     * Erodes an image by using a specific structuring element.
3681     *
3682     * The function erodes the source image using the specified structuring element that determines the
3683     * shape of a pixel neighborhood over which the minimum is taken:
3684     *
3685     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3686     *
3687     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3688     * case of multi-channel images, each channel is processed independently.
3689     *
3690     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3691     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3692     * @param dst output image of the same size and type as src.
3693     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3694     * structuring element is used. Kernel can be created using #getStructuringElement.
3695     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3696     * anchor is at the element center.
3697     * @param iterations number of times erosion is applied.
3698     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3699     * SEE:  dilate, morphologyEx, getStructuringElement
3700     */
3701    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
3702        erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3703    }
3704
3705    /**
3706     * Erodes an image by using a specific structuring element.
3707     *
3708     * The function erodes the source image using the specified structuring element that determines the
3709     * shape of a pixel neighborhood over which the minimum is taken:
3710     *
3711     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3712     *
3713     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3714     * case of multi-channel images, each channel is processed independently.
3715     *
3716     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3717     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3718     * @param dst output image of the same size and type as src.
3719     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3720     * structuring element is used. Kernel can be created using #getStructuringElement.
3721     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3722     * anchor is at the element center.
3723     * @param iterations number of times erosion is applied.
3724     * SEE:  dilate, morphologyEx, getStructuringElement
3725     */
3726    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) {
3727        erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
3728    }
3729
3730    /**
3731     * Erodes an image by using a specific structuring element.
3732     *
3733     * The function erodes the source image using the specified structuring element that determines the
3734     * shape of a pixel neighborhood over which the minimum is taken:
3735     *
3736     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3737     *
3738     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3739     * case of multi-channel images, each channel is processed independently.
3740     *
3741     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3742     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3743     * @param dst output image of the same size and type as src.
3744     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3745     * structuring element is used. Kernel can be created using #getStructuringElement.
3746     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3747     * anchor is at the element center.
3748     * SEE:  dilate, morphologyEx, getStructuringElement
3749     */
3750    public static void erode(Mat src, Mat dst, Mat kernel, Point anchor) {
3751        erode_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y);
3752    }
3753
3754    /**
3755     * Erodes an image by using a specific structuring element.
3756     *
3757     * The function erodes the source image using the specified structuring element that determines the
3758     * shape of a pixel neighborhood over which the minimum is taken:
3759     *
3760     * \(\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3761     *
3762     * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In
3763     * case of multi-channel images, each channel is processed independently.
3764     *
3765     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3766     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3767     * @param dst output image of the same size and type as src.
3768     * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular
3769     * structuring element is used. Kernel can be created using #getStructuringElement.
3770     * anchor is at the element center.
3771     * SEE:  dilate, morphologyEx, getStructuringElement
3772     */
3773    public static void erode(Mat src, Mat dst, Mat kernel) {
3774        erode_4(src.nativeObj, dst.nativeObj, kernel.nativeObj);
3775    }
3776
3777
3778    //
3779    // C++:  void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3780    //
3781
3782    /**
3783     * Dilates an image by using a specific structuring element.
3784     *
3785     * The function dilates the source image using the specified structuring element that determines the
3786     * shape of a pixel neighborhood over which the maximum is taken:
3787     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3788     *
3789     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3790     * case of multi-channel images, each channel is processed independently.
3791     *
3792     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3793     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3794     * @param dst output image of the same size and type as src.
3795     * @param kernel structuring element used for dilation; if element=Mat(), a 3 x 3 rectangular
3796     * structuring element is used. Kernel can be created using #getStructuringElement
3797     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3798     * anchor is at the element center.
3799     * @param iterations number of times dilation is applied.
3800     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
3801     * @param borderValue border value in case of a constant border
3802     * SEE:  erode, morphologyEx, getStructuringElement
3803     */
3804    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3805        dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3806    }
3807
3808    /**
3809     * Dilates an image by using a specific structuring element.
3810     *
3811     * The function dilates the source image using the specified structuring element that determines the
3812     * shape of a pixel neighborhood over which the maximum is taken:
3813     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3814     *
3815     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3816     * case of multi-channel images, each channel is processed independently.
3817     *
3818     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3819     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3820     * @param dst output image of the same size and type as src.
3821     * @param kernel structuring element used for dilation; if element=Mat(), a 3 x 3 rectangular
3822     * structuring element is used. Kernel can be created using #getStructuringElement
3823     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3824     * anchor is at the element center.
3825     * @param iterations number of times dilation is applied.
3826     * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
3827     * SEE:  erode, morphologyEx, getStructuringElement
3828     */
3829    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) {
3830        dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3831    }
3832
3833    /**
3834     * Dilates an image by using a specific structuring element.
3835     *
3836     * The function dilates the source image using the specified structuring element that determines the
3837     * shape of a pixel neighborhood over which the maximum is taken:
3838     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3839     *
3840     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3841     * case of multi-channel images, each channel is processed independently.
3842     *
3843     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3844     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3845     * @param dst output image of the same size and type as src.
3846     * @param kernel structuring element used for dilation; if element=Mat(), a 3 x 3 rectangular
3847     * structuring element is used. Kernel can be created using #getStructuringElement
3848     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3849     * anchor is at the element center.
3850     * @param iterations number of times dilation is applied.
3851     * SEE:  erode, morphologyEx, getStructuringElement
3852     */
3853    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) {
3854        dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
3855    }
3856
3857    /**
3858     * Dilates an image by using a specific structuring element.
3859     *
3860     * The function dilates the source image using the specified structuring element that determines the
3861     * shape of a pixel neighborhood over which the maximum is taken:
3862     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3863     *
3864     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3865     * case of multi-channel images, each channel is processed independently.
3866     *
3867     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3868     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3869     * @param dst output image of the same size and type as src.
3870     * @param kernel structuring element used for dilation; if element=Mat(), a 3 x 3 rectangular
3871     * structuring element is used. Kernel can be created using #getStructuringElement
3872     * @param anchor position of the anchor within the element; default value (-1, -1) means that the
3873     * anchor is at the element center.
3874     * SEE:  erode, morphologyEx, getStructuringElement
3875     */
3876    public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor) {
3877        dilate_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y);
3878    }
3879
3880    /**
3881     * Dilates an image by using a specific structuring element.
3882     *
3883     * The function dilates the source image using the specified structuring element that determines the
3884     * shape of a pixel neighborhood over which the maximum is taken:
3885     * \(\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\)
3886     *
3887     * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In
3888     * case of multi-channel images, each channel is processed independently.
3889     *
3890     * @param src input image; the number of channels can be arbitrary, but the depth should be one of
3891     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3892     * @param dst output image of the same size and type as src.
3893     * @param kernel structuring element used for dilation; if element=Mat(), a 3 x 3 rectangular
3894     * structuring element is used. Kernel can be created using #getStructuringElement
3895     * anchor is at the element center.
3896     * SEE:  erode, morphologyEx, getStructuringElement
3897     */
3898    public static void dilate(Mat src, Mat dst, Mat kernel) {
3899        dilate_4(src.nativeObj, dst.nativeObj, kernel.nativeObj);
3900    }
3901
3902
3903    //
3904    // C++:  void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
3905    //
3906
3907    /**
3908     * Performs advanced morphological transformations.
3909     *
3910     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3911     * basic operations.
3912     *
3913     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3914     * processed independently.
3915     *
3916     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3917     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3918     * @param dst Destination image of the same size and type as source image.
3919     * @param op Type of a morphological operation, see #MorphTypes
3920     * @param kernel Structuring element. It can be created using #getStructuringElement.
3921     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3922     * kernel center.
3923     * @param iterations Number of times erosion and dilation are applied.
3924     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3925     * @param borderValue Border value in case of a constant border. The default value has a special
3926     * meaning.
3927     * SEE:  dilate, erode, getStructuringElement
3928     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3929     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3930     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3931     */
3932    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) {
3933        morphologyEx_0(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
3934    }
3935
3936    /**
3937     * Performs advanced morphological transformations.
3938     *
3939     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3940     * basic operations.
3941     *
3942     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3943     * processed independently.
3944     *
3945     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3946     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3947     * @param dst Destination image of the same size and type as source image.
3948     * @param op Type of a morphological operation, see #MorphTypes
3949     * @param kernel Structuring element. It can be created using #getStructuringElement.
3950     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3951     * kernel center.
3952     * @param iterations Number of times erosion and dilation are applied.
3953     * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
3954     * meaning.
3955     * SEE:  dilate, erode, getStructuringElement
3956     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3957     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3958     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3959     */
3960    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType) {
3961        morphologyEx_1(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType);
3962    }
3963
3964    /**
3965     * Performs advanced morphological transformations.
3966     *
3967     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3968     * basic operations.
3969     *
3970     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3971     * processed independently.
3972     *
3973     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
3974     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
3975     * @param dst Destination image of the same size and type as source image.
3976     * @param op Type of a morphological operation, see #MorphTypes
3977     * @param kernel Structuring element. It can be created using #getStructuringElement.
3978     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
3979     * kernel center.
3980     * @param iterations Number of times erosion and dilation are applied.
3981     * meaning.
3982     * SEE:  dilate, erode, getStructuringElement
3983     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
3984     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
3985     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
3986     */
3987    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations) {
3988        morphologyEx_2(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations);
3989    }
3990
3991    /**
3992     * Performs advanced morphological transformations.
3993     *
3994     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
3995     * basic operations.
3996     *
3997     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
3998     * processed independently.
3999     *
4000     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
4001     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
4002     * @param dst Destination image of the same size and type as source image.
4003     * @param op Type of a morphological operation, see #MorphTypes
4004     * @param kernel Structuring element. It can be created using #getStructuringElement.
4005     * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
4006     * kernel center.
4007     * meaning.
4008     * SEE:  dilate, erode, getStructuringElement
4009     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
4010     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
4011     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
4012     */
4013    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor) {
4014        morphologyEx_3(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y);
4015    }
4016
4017    /**
4018     * Performs advanced morphological transformations.
4019     *
4020     * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as
4021     * basic operations.
4022     *
4023     * Any of the operations can be done in-place. In case of multi-channel images, each channel is
4024     * processed independently.
4025     *
4026     * @param src Source image. The number of channels can be arbitrary. The depth should be one of
4027     * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
4028     * @param dst Destination image of the same size and type as source image.
4029     * @param op Type of a morphological operation, see #MorphTypes
4030     * @param kernel Structuring element. It can be created using #getStructuringElement.
4031     * kernel center.
4032     * meaning.
4033     * SEE:  dilate, erode, getStructuringElement
4034     * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied.
4035     * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply
4036     * successively: erode -&gt; erode -&gt; dilate -&gt; dilate (and not erode -&gt; dilate -&gt; erode -&gt; dilate).
4037     */
4038    public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel) {
4039        morphologyEx_4(src.nativeObj, dst.nativeObj, op, kernel.nativeObj);
4040    }
4041
4042
4043    //
4044    // C++:  void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
4045    //
4046
4047    /**
4048     * Resizes an image.
4049     *
4050     * The function resize resizes the image src down to or up to the specified size. Note that the
4051     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4052     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4053     * you may call the function as follows:
4054     * <code>
4055     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4056     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4057     * </code>
4058     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4059     * way:
4060     * <code>
4061     *     // specify fx and fy and let the function compute the destination image size.
4062     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4063     * </code>
4064     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4065     * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
4066     * (faster but still looks OK).
4067     *
4068     * @param src input image.
4069     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4070     * src.size(), fx, and fy; the type of dst is the same as of src.
4071     * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as:
4072     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4073     *  Either dsize or both fx and fy must be non-zero.
4074     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4075     * \(\texttt{(double)dsize.width/src.cols}\)
4076     * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
4077     * \(\texttt{(double)dsize.height/src.rows}\)
4078     * @param interpolation interpolation method, see #InterpolationFlags
4079     *
4080     * SEE:  warpAffine, warpPerspective, remap
4081     */
4082    public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interpolation) {
4083        resize_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy, interpolation);
4084    }
4085
4086    /**
4087     * Resizes an image.
4088     *
4089     * The function resize resizes the image src down to or up to the specified size. Note that the
4090     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4091     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4092     * you may call the function as follows:
4093     * <code>
4094     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4095     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4096     * </code>
4097     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4098     * way:
4099     * <code>
4100     *     // specify fx and fy and let the function compute the destination image size.
4101     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4102     * </code>
4103     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4104     * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
4105     * (faster but still looks OK).
4106     *
4107     * @param src input image.
4108     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4109     * src.size(), fx, and fy; the type of dst is the same as of src.
4110     * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as:
4111     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4112     *  Either dsize or both fx and fy must be non-zero.
4113     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4114     * \(\texttt{(double)dsize.width/src.cols}\)
4115     * @param fy scale factor along the vertical axis; when it equals 0, it is computed as
4116     * \(\texttt{(double)dsize.height/src.rows}\)
4117     *
4118     * SEE:  warpAffine, warpPerspective, remap
4119     */
4120    public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy) {
4121        resize_1(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy);
4122    }
4123
4124    /**
4125     * Resizes an image.
4126     *
4127     * The function resize resizes the image src down to or up to the specified size. Note that the
4128     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4129     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4130     * you may call the function as follows:
4131     * <code>
4132     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4133     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4134     * </code>
4135     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4136     * way:
4137     * <code>
4138     *     // specify fx and fy and let the function compute the destination image size.
4139     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4140     * </code>
4141     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4142     * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
4143     * (faster but still looks OK).
4144     *
4145     * @param src input image.
4146     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4147     * src.size(), fx, and fy; the type of dst is the same as of src.
4148     * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as:
4149     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4150     *  Either dsize or both fx and fy must be non-zero.
4151     * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as
4152     * \(\texttt{(double)dsize.width/src.cols}\)
4153     * \(\texttt{(double)dsize.height/src.rows}\)
4154     *
4155     * SEE:  warpAffine, warpPerspective, remap
4156     */
4157    public static void resize(Mat src, Mat dst, Size dsize, double fx) {
4158        resize_2(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx);
4159    }
4160
4161    /**
4162     * Resizes an image.
4163     *
4164     * The function resize resizes the image src down to or up to the specified size. Note that the
4165     * initial dst type or size are not taken into account. Instead, the size and type are derived from
4166     * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst,
4167     * you may call the function as follows:
4168     * <code>
4169     *     // explicitly specify dsize=dst.size(); fx and fy will be computed from that.
4170     *     resize(src, dst, dst.size(), 0, 0, interpolation);
4171     * </code>
4172     * If you want to decimate the image by factor of 2 in each direction, you can call the function this
4173     * way:
4174     * <code>
4175     *     // specify fx and fy and let the function compute the destination image size.
4176     *     resize(src, dst, Size(), 0.5, 0.5, interpolation);
4177     * </code>
4178     * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to
4179     * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR
4180     * (faster but still looks OK).
4181     *
4182     * @param src input image.
4183     * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from
4184     * src.size(), fx, and fy; the type of dst is the same as of src.
4185     * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as:
4186     *  \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\)
4187     *  Either dsize or both fx and fy must be non-zero.
4188     * \(\texttt{(double)dsize.width/src.cols}\)
4189     * \(\texttt{(double)dsize.height/src.rows}\)
4190     *
4191     * SEE:  warpAffine, warpPerspective, remap
4192     */
4193    public static void resize(Mat src, Mat dst, Size dsize) {
4194        resize_3(src.nativeObj, dst.nativeObj, dsize.width, dsize.height);
4195    }
4196
4197
4198    //
4199    // C++:  void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4200    //
4201
4202    /**
4203     * Applies an affine transformation to an image.
4204     *
4205     * The function warpAffine transforms the source image using the specified matrix:
4206     *
4207     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4208     *
4209     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4210     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4211     * operate in-place.
4212     *
4213     * @param src input image.
4214     * @param dst output image that has the size dsize and the same type as src .
4215     * @param M \(2\times 3\) transformation matrix.
4216     * @param dsize size of the output image.
4217     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4218     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4219     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4220     * @param borderMode pixel extrapolation method (see #BorderTypes); when
4221     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4222     * the "outliers" in the source image are not modified by the function.
4223     * @param borderValue value used in case of a constant border; by default, it is 0.
4224     *
4225     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4226     */
4227    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) {
4228        warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4229    }
4230
4231    /**
4232     * Applies an affine transformation to an image.
4233     *
4234     * The function warpAffine transforms the source image using the specified matrix:
4235     *
4236     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4237     *
4238     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4239     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4240     * operate in-place.
4241     *
4242     * @param src input image.
4243     * @param dst output image that has the size dsize and the same type as src .
4244     * @param M \(2\times 3\) transformation matrix.
4245     * @param dsize size of the output image.
4246     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4247     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4248     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4249     * @param borderMode pixel extrapolation method (see #BorderTypes); when
4250     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4251     * the "outliers" in the source image are not modified by the function.
4252     *
4253     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4254     */
4255    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) {
4256        warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode);
4257    }
4258
4259    /**
4260     * Applies an affine transformation to an image.
4261     *
4262     * The function warpAffine transforms the source image using the specified matrix:
4263     *
4264     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4265     *
4266     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4267     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4268     * operate in-place.
4269     *
4270     * @param src input image.
4271     * @param dst output image that has the size dsize and the same type as src .
4272     * @param M \(2\times 3\) transformation matrix.
4273     * @param dsize size of the output image.
4274     * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional
4275     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4276     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4277     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4278     * the "outliers" in the source image are not modified by the function.
4279     *
4280     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4281     */
4282    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags) {
4283        warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
4284    }
4285
4286    /**
4287     * Applies an affine transformation to an image.
4288     *
4289     * The function warpAffine transforms the source image using the specified matrix:
4290     *
4291     * \(\texttt{dst} (x,y) =  \texttt{src} ( \texttt{M} _{11} x +  \texttt{M} _{12} y +  \texttt{M} _{13}, \texttt{M} _{21} x +  \texttt{M} _{22} y +  \texttt{M} _{23})\)
4292     *
4293     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
4294     * with #invertAffineTransform and then put in the formula above instead of M. The function cannot
4295     * operate in-place.
4296     *
4297     * @param src input image.
4298     * @param dst output image that has the size dsize and the same type as src .
4299     * @param M \(2\times 3\) transformation matrix.
4300     * @param dsize size of the output image.
4301     * flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
4302     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4303     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to
4304     * the "outliers" in the source image are not modified by the function.
4305     *
4306     * SEE:  warpPerspective, resize, remap, getRectSubPix, transform
4307     */
4308    public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize) {
4309        warpAffine_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
4310    }
4311
4312
4313    //
4314    // C++:  void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4315    //
4316
4317    /**
4318     * Applies a perspective transformation to an image.
4319     *
4320     * The function warpPerspective transforms the source image using the specified matrix:
4321     *
4322     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4323     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4324     *
4325     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4326     * and then put in the formula above instead of M. The function cannot operate in-place.
4327     *
4328     * @param src input image.
4329     * @param dst output image that has the size dsize and the same type as src .
4330     * @param M \(3\times 3\) transformation matrix.
4331     * @param dsize size of the output image.
4332     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4333     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4334     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4335     * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
4336     * @param borderValue value used in case of a constant border; by default, it equals 0.
4337     *
4338     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4339     */
4340    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) {
4341        warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4342    }
4343
4344    /**
4345     * Applies a perspective transformation to an image.
4346     *
4347     * The function warpPerspective transforms the source image using the specified matrix:
4348     *
4349     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4350     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4351     *
4352     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4353     * and then put in the formula above instead of M. The function cannot operate in-place.
4354     *
4355     * @param src input image.
4356     * @param dst output image that has the size dsize and the same type as src .
4357     * @param M \(3\times 3\) transformation matrix.
4358     * @param dsize size of the output image.
4359     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4360     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4361     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4362     * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
4363     *
4364     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4365     */
4366    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) {
4367        warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode);
4368    }
4369
4370    /**
4371     * Applies a perspective transformation to an image.
4372     *
4373     * The function warpPerspective transforms the source image using the specified matrix:
4374     *
4375     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4376     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4377     *
4378     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4379     * and then put in the formula above instead of M. The function cannot operate in-place.
4380     *
4381     * @param src input image.
4382     * @param dst output image that has the size dsize and the same type as src .
4383     * @param M \(3\times 3\) transformation matrix.
4384     * @param dsize size of the output image.
4385     * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
4386     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4387     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4388     *
4389     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4390     */
4391    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags) {
4392        warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
4393    }
4394
4395    /**
4396     * Applies a perspective transformation to an image.
4397     *
4398     * The function warpPerspective transforms the source image using the specified matrix:
4399     *
4400     * \(\texttt{dst} (x,y) =  \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
4401     *      \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\)
4402     *
4403     * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
4404     * and then put in the formula above instead of M. The function cannot operate in-place.
4405     *
4406     * @param src input image.
4407     * @param dst output image that has the size dsize and the same type as src .
4408     * @param M \(3\times 3\) transformation matrix.
4409     * @param dsize size of the output image.
4410     * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
4411     * \(\texttt{dst}\rightarrow\texttt{src}\) ).
4412     *
4413     * SEE:  warpAffine, resize, remap, getRectSubPix, perspectiveTransform
4414     */
4415    public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize) {
4416        warpPerspective_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
4417    }
4418
4419
4420    //
4421    // C++:  void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
4422    //
4423
4424    /**
4425     * Applies a generic geometrical transformation to an image.
4426     *
4427     * The function remap transforms the source image using the specified map:
4428     *
4429     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4430     *
4431     * where values of pixels with non-integer coordinates are computed using one of available
4432     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4433     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4434     * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to
4435     * convert from floating to fixed-point representations of a map is that they can yield much faster
4436     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4437     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4438     *
4439     * This function cannot operate in-place.
4440     *
4441     * @param src Source image.
4442     * @param dst Destination image. It has the same size as map1 and the same type as src .
4443     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4444     * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
4445     * representation to fixed-point for speed.
4446     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4447     * if map1 is (x,y) points), respectively.
4448     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4449     * and #INTER_LINEAR_EXACT are not supported by this function.
4450     * @param borderMode Pixel extrapolation method (see #BorderTypes). When
4451     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4452     * corresponds to the "outliers" in the source image are not modified by the function.
4453     * @param borderValue Value used in case of a constant border. By default, it is 0.
4454     * <b>Note:</b>
4455     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4456     */
4457    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) {
4458        remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
4459    }
4460
4461    /**
4462     * Applies a generic geometrical transformation to an image.
4463     *
4464     * The function remap transforms the source image using the specified map:
4465     *
4466     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4467     *
4468     * where values of pixels with non-integer coordinates are computed using one of available
4469     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4470     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4471     * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to
4472     * convert from floating to fixed-point representations of a map is that they can yield much faster
4473     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4474     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4475     *
4476     * This function cannot operate in-place.
4477     *
4478     * @param src Source image.
4479     * @param dst Destination image. It has the same size as map1 and the same type as src .
4480     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4481     * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
4482     * representation to fixed-point for speed.
4483     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4484     * if map1 is (x,y) points), respectively.
4485     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4486     * and #INTER_LINEAR_EXACT are not supported by this function.
4487     * @param borderMode Pixel extrapolation method (see #BorderTypes). When
4488     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4489     * corresponds to the "outliers" in the source image are not modified by the function.
4490     * <b>Note:</b>
4491     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4492     */
4493    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode) {
4494        remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode);
4495    }
4496
4497    /**
4498     * Applies a generic geometrical transformation to an image.
4499     *
4500     * The function remap transforms the source image using the specified map:
4501     *
4502     * \(\texttt{dst} (x,y) =  \texttt{src} (map_x(x,y),map_y(x,y))\)
4503     *
4504     * where values of pixels with non-integer coordinates are computed using one of available
4505     * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps
4506     * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in
4507     * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to
4508     * convert from floating to fixed-point representations of a map is that they can yield much faster
4509     * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x),
4510     * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients.
4511     *
4512     * This function cannot operate in-place.
4513     *
4514     * @param src Source image.
4515     * @param dst Destination image. It has the same size as map1 and the same type as src .
4516     * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,
4517     * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point
4518     * representation to fixed-point for speed.
4519     * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
4520     * if map1 is (x,y) points), respectively.
4521     * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA
4522     * and #INTER_LINEAR_EXACT are not supported by this function.
4523     * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that
4524     * corresponds to the "outliers" in the source image are not modified by the function.
4525     * <b>Note:</b>
4526     * Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
4527     */
4528    public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation) {
4529        remap_2(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation);
4530    }
4531
4532
4533    //
4534    // C++:  void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
4535    //
4536
4537    /**
4538     * Converts image transformation maps from one representation to another.
4539     *
4540     * The function converts a pair of maps for remap from one representation to another. The following
4541     * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are
4542     * supported:
4543     *
4544     * <ul>
4545     *   <li>
4546     *  \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the
4547     * most frequently used conversion operation, in which the original floating-point maps (see #remap)
4548     * are converted to a more compact and much faster fixed-point representation. The first output array
4549     * contains the rounded coordinates and the second array (created only when nninterpolation=false )
4550     * contains indices in the interpolation tables.
4551     *   </li>
4552     * </ul>
4553     *
4554     * <ul>
4555     *   <li>
4556     *  \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but
4557     * the original maps are stored in one 2-channel matrix.
4558     *   </li>
4559     * </ul>
4560     *
4561     * <ul>
4562     *   <li>
4563     *  Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
4564     * as the originals.
4565     *   </li>
4566     * </ul>
4567     *
4568     * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
4569     * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
4570     * respectively.
4571     * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
4572     * @param dstmap2 The second output map.
4573     * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
4574     * CV_32FC2 .
4575     * @param nninterpolation Flag indicating whether the fixed-point maps are used for the
4576     * nearest-neighbor or for a more complex interpolation.
4577     *
4578     * SEE:  remap, undistort, initUndistortRectifyMap
4579     */
4580    public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation) {
4581        convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation);
4582    }
4583
4584    /**
4585     * Converts image transformation maps from one representation to another.
4586     *
4587     * The function converts a pair of maps for remap from one representation to another. The following
4588     * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are
4589     * supported:
4590     *
4591     * <ul>
4592     *   <li>
4593     *  \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the
4594     * most frequently used conversion operation, in which the original floating-point maps (see #remap)
4595     * are converted to a more compact and much faster fixed-point representation. The first output array
4596     * contains the rounded coordinates and the second array (created only when nninterpolation=false )
4597     * contains indices in the interpolation tables.
4598     *   </li>
4599     * </ul>
4600     *
4601     * <ul>
4602     *   <li>
4603     *  \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but
4604     * the original maps are stored in one 2-channel matrix.
4605     *   </li>
4606     * </ul>
4607     *
4608     * <ul>
4609     *   <li>
4610     *  Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same
4611     * as the originals.
4612     *   </li>
4613     * </ul>
4614     *
4615     * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .
4616     * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),
4617     * respectively.
4618     * @param dstmap1 The first output map that has the type dstmap1type and the same size as src .
4619     * @param dstmap2 The second output map.
4620     * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or
4621     * CV_32FC2 .
4622     * nearest-neighbor or for a more complex interpolation.
4623     *
4624     * SEE:  remap, undistort, initUndistortRectifyMap
4625     */
4626    public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type) {
4627        convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type);
4628    }
4629
4630
4631    //
4632    // C++:  Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale)
4633    //
4634
4635    /**
4636     * Calculates an affine matrix of 2D rotation.
4637     *
4638     * The function calculates the following matrix:
4639     *
4640     * \(\begin{bmatrix} \alpha &amp;  \beta &amp; (1- \alpha )  \cdot \texttt{center.x} -  \beta \cdot \texttt{center.y} \\ - \beta &amp;  \alpha &amp;  \beta \cdot \texttt{center.x} + (1- \alpha )  \cdot \texttt{center.y} \end{bmatrix}\)
4641     *
4642     * where
4643     *
4644     * \(\begin{array}{l} \alpha =  \texttt{scale} \cdot \cos \texttt{angle} , \\ \beta =  \texttt{scale} \cdot \sin \texttt{angle} \end{array}\)
4645     *
4646     * The transformation maps the rotation center to itself. If this is not the target, adjust the shift.
4647     *
4648     * @param center Center of the rotation in the source image.
4649     * @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the
4650     * coordinate origin is assumed to be the top-left corner).
4651     * @param scale Isotropic scale factor.
4652     *
4653     * SEE:  getAffineTransform, warpAffine, transform
4654     * @return automatically generated
4655     */
4656    public static Mat getRotationMatrix2D(Point center, double angle, double scale) {
4657        return new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale));
4658    }
4659
4660
4661    //
4662    // C++:  void cv::invertAffineTransform(Mat M, Mat& iM)
4663    //
4664
4665    /**
4666     * Inverts an affine transformation.
4667     *
4668     * The function computes an inverse affine transformation represented by \(2 \times 3\) matrix M:
4669     *
4670     * \(\begin{bmatrix} a_{11} &amp; a_{12} &amp; b_1  \\ a_{21} &amp; a_{22} &amp; b_2 \end{bmatrix}\)
4671     *
4672     * The result is also a \(2 \times 3\) matrix of the same type as M.
4673     *
4674     * @param M Original affine transformation.
4675     * @param iM Output reverse affine transformation.
4676     */
4677    public static void invertAffineTransform(Mat M, Mat iM) {
4678        invertAffineTransform_0(M.nativeObj, iM.nativeObj);
4679    }
4680
4681
4682    //
4683    // C++:  Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU)
4684    //
4685
4686    /**
4687     * Calculates a perspective transform from four pairs of the corresponding points.
4688     *
4689     * The function calculates the \(3 \times 3\) matrix of a perspective transform so that:
4690     *
4691     * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\)
4692     *
4693     * where
4694     *
4695     * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\)
4696     *
4697     * @param src Coordinates of quadrangle vertices in the source image.
4698     * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
4699     * @param solveMethod method passed to cv::solve (#DecompTypes)
4700     *
4701     * SEE:  findHomography, warpPerspective, perspectiveTransform
4702     * @return automatically generated
4703     */
4704    public static Mat getPerspectiveTransform(Mat src, Mat dst, int solveMethod) {
4705        return new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj, solveMethod));
4706    }
4707
4708    /**
4709     * Calculates a perspective transform from four pairs of the corresponding points.
4710     *
4711     * The function calculates the \(3 \times 3\) matrix of a perspective transform so that:
4712     *
4713     * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\)
4714     *
4715     * where
4716     *
4717     * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\)
4718     *
4719     * @param src Coordinates of quadrangle vertices in the source image.
4720     * @param dst Coordinates of the corresponding quadrangle vertices in the destination image.
4721     *
4722     * SEE:  findHomography, warpPerspective, perspectiveTransform
4723     * @return automatically generated
4724     */
4725    public static Mat getPerspectiveTransform(Mat src, Mat dst) {
4726        return new Mat(getPerspectiveTransform_1(src.nativeObj, dst.nativeObj));
4727    }
4728
4729
4730    //
4731    // C++:  Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst)
4732    //
4733
4734    public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) {
4735        Mat src_mat = src;
4736        Mat dst_mat = dst;
4737        return new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj));
4738    }
4739
4740
4741    //
4742    // C++:  void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
4743    //
4744
4745    /**
4746     * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
4747     *
4748     * The function getRectSubPix extracts pixels from src:
4749     *
4750     * \(patch(x, y) = src(x +  \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y +  \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\)
4751     *
4752     * where the values of the pixels at non-integer coordinates are retrieved using bilinear
4753     * interpolation. Every channel of multi-channel images is processed independently. Also
4754     * the image should be a single channel or three channel image. While the center of the
4755     * rectangle must be inside the image, parts of the rectangle may be outside.
4756     *
4757     * @param image Source image.
4758     * @param patchSize Size of the extracted patch.
4759     * @param center Floating point coordinates of the center of the extracted rectangle within the
4760     * source image. The center must be inside the image.
4761     * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
4762     * @param patchType Depth of the extracted pixels. By default, they have the same depth as src .
4763     *
4764     * SEE:  warpAffine, warpPerspective
4765     */
4766    public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType) {
4767        getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType);
4768    }
4769
4770    /**
4771     * Retrieves a pixel rectangle from an image with sub-pixel accuracy.
4772     *
4773     * The function getRectSubPix extracts pixels from src:
4774     *
4775     * \(patch(x, y) = src(x +  \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y +  \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\)
4776     *
4777     * where the values of the pixels at non-integer coordinates are retrieved using bilinear
4778     * interpolation. Every channel of multi-channel images is processed independently. Also
4779     * the image should be a single channel or three channel image. While the center of the
4780     * rectangle must be inside the image, parts of the rectangle may be outside.
4781     *
4782     * @param image Source image.
4783     * @param patchSize Size of the extracted patch.
4784     * @param center Floating point coordinates of the center of the extracted rectangle within the
4785     * source image. The center must be inside the image.
4786     * @param patch Extracted patch that has the size patchSize and the same number of channels as src .
4787     *
4788     * SEE:  warpAffine, warpPerspective
4789     */
4790    public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) {
4791        getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj);
4792    }
4793
4794
4795    //
4796    // C++:  void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags)
4797    //
4798
4799    /**
4800     * Remaps an image to semilog-polar coordinates space.
4801     *
4802     * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG);
4803     *
4804     *
4805     * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image d)"):
4806     * \(\begin{array}{l}
4807     *   dst( \rho , \phi ) = src(x,y) \\
4808     *   dst.size() \leftarrow src.size()
4809     * \end{array}\)
4810     *
4811     * where
4812     * \(\begin{array}{l}
4813     *   I = (dx,dy) = (x - center.x,y - center.y) \\
4814     *   \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\
4815     *   \phi = Kangle \cdot \texttt{angle} (I) \\
4816     * \end{array}\)
4817     *
4818     * and
4819     * \(\begin{array}{l}
4820     *   M = src.cols / log_e(maxRadius) \\
4821     *   Kangle = src.rows / 2\Pi \\
4822     * \end{array}\)
4823     *
4824     * The function emulates the human "foveal" vision and can be used for fast scale and
4825     * rotation-invariant template matching, for object tracking and so forth.
4826     * @param src Source image
4827     * @param dst Destination image. It will have same size and type as src.
4828     * @param center The transformation center; where the output precision is maximal
4829     * @param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too.
4830     * @param flags A combination of interpolation methods, see #InterpolationFlags
4831     *
4832     * <b>Note:</b>
4833     * <ul>
4834     *   <li>
4835     *    The function can not operate in-place.
4836     *   </li>
4837     *   <li>
4838     *    To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
4839     *   </li>
4840     * </ul>
4841     *
4842     * SEE: cv::linearPolar
4843     */
4844    @Deprecated
4845    public static void logPolar(Mat src, Mat dst, Point center, double M, int flags) {
4846        logPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, M, flags);
4847    }
4848
4849
4850    //
4851    // C++:  void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags)
4852    //
4853
4854    /**
4855     * Remaps an image to polar coordinates space.
4856     *
4857     * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags)
4858     *
4859     *
4860     * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image c)"):
4861     * \(\begin{array}{l}
4862     *   dst( \rho , \phi ) = src(x,y) \\
4863     *   dst.size() \leftarrow src.size()
4864     * \end{array}\)
4865     *
4866     * where
4867     * \(\begin{array}{l}
4868     *   I = (dx,dy) = (x - center.x,y - center.y) \\
4869     *   \rho = Kmag \cdot \texttt{magnitude} (I) ,\\
4870     *   \phi = angle \cdot \texttt{angle} (I)
4871     * \end{array}\)
4872     *
4873     * and
4874     * \(\begin{array}{l}
4875     *   Kx = src.cols / maxRadius \\
4876     *   Ky = src.rows / 2\Pi
4877     * \end{array}\)
4878     *
4879     *
4880     * @param src Source image
4881     * @param dst Destination image. It will have same size and type as src.
4882     * @param center The transformation center;
4883     * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
4884     * @param flags A combination of interpolation methods, see #InterpolationFlags
4885     *
4886     * <b>Note:</b>
4887     * <ul>
4888     *   <li>
4889     *    The function can not operate in-place.
4890     *   </li>
4891     *   <li>
4892     *    To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
4893     *   </li>
4894     * </ul>
4895     *
4896     * SEE: cv::logPolar
4897     */
4898    @Deprecated
4899    public static void linearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) {
4900        linearPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, maxRadius, flags);
4901    }
4902
4903
4904    //
4905    // C++:  void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags)
4906    //
4907
4908    /**
4909     * Remaps an image to polar or semilog-polar coordinates space
4910     *
4911     *  polar_remaps_reference_image
4912     * ![Polar remaps reference](pics/polar_remap_doc.png)
4913     *
4914     * Transform the source image using the following transformation:
4915     * \(
4916     * dst(\rho , \phi ) = src(x,y)
4917     * \)
4918     *
4919     * where
4920     * \(
4921     * \begin{array}{l}
4922     * \vec{I} = (x - center.x, \;y - center.y) \\
4923     * \phi = Kangle \cdot \texttt{angle} (\vec{I}) \\
4924     * \rho = \left\{\begin{matrix}
4925     * Klin \cdot \texttt{magnitude} (\vec{I}) &amp; default \\
4926     * Klog \cdot log_e(\texttt{magnitude} (\vec{I})) &amp; if \; semilog \\
4927     * \end{matrix}\right.
4928     * \end{array}
4929     * \)
4930     *
4931     * and
4932     * \(
4933     * \begin{array}{l}
4934     * Kangle = dsize.height / 2\Pi \\
4935     * Klin = dsize.width / maxRadius \\
4936     * Klog = dsize.width / log_e(maxRadius) \\
4937     * \end{array}
4938     * \)
4939     *
4940     *
4941     * \par Linear vs semilog mapping
4942     *
4943     * Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to {@code flags} to specify the polar mapping mode.
4944     *
4945     * Linear is the default mode.
4946     *
4947     * The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of sight (central vision)
4948     * in contrast to peripheral vision where acuity is minor.
4949     *
4950     * \par Option on {@code dsize}:
4951     *
4952     * <ul>
4953     *   <li>
4954     *  if both values in {@code dsize &lt;=0 } (default),
4955     * the destination image will have (almost) same area of source bounding circle:
4956     * \(\begin{array}{l}
4957     * dsize.area  \leftarrow (maxRadius^2 \cdot \Pi) \\
4958     * dsize.width = \texttt{cvRound}(maxRadius) \\
4959     * dsize.height = \texttt{cvRound}(maxRadius \cdot \Pi) \\
4960     * \end{array}\)
4961     *   </li>
4962     * </ul>
4963     *
4964     *
4965     * <ul>
4966     *   <li>
4967     *  if only {@code dsize.height &lt;= 0},
4968     * the destination image area will be proportional to the bounding circle area but scaled by {@code Kx * Kx}:
4969     * \(\begin{array}{l}
4970     * dsize.height = \texttt{cvRound}(dsize.width \cdot \Pi) \\
4971     * \end{array}
4972     * \)
4973     *   </li>
4974     * </ul>
4975     *
4976     * <ul>
4977     *   <li>
4978     *  if both values in {@code dsize &gt; 0 },
4979     * the destination image will have the given size therefore the area of the bounding circle will be scaled to {@code dsize}.
4980     *   </li>
4981     * </ul>
4982     *
4983     *
4984     * \par Reverse mapping
4985     *
4986     * You can get reverse mapping adding #WARP_INVERSE_MAP to {@code flags}
4987     * \snippet polar_transforms.cpp InverseMap
4988     *
4989     * In addiction, to calculate the original coordinate from a polar mapped coordinate \((rho, phi)-&gt;(x, y)\):
4990     * \snippet polar_transforms.cpp InverseCoordinate
4991     *
4992     * @param src Source image.
4993     * @param dst Destination image. It will have same type as src.
4994     * @param dsize The destination image size (see description for valid options).
4995     * @param center The transformation center.
4996     * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.
4997     * @param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode.
4998     * <ul>
4999     *   <li>
5000     *              Add #WARP_POLAR_LINEAR to select linear polar mapping (default)
5001     *   </li>
5002     *   <li>
5003     *              Add #WARP_POLAR_LOG to select semilog polar mapping
5004     *   </li>
5005     *   <li>
5006     *              Add #WARP_INVERSE_MAP for reverse mapping.
5007     *   </li>
5008     * </ul>
5009     * <b>Note:</b>
5010     * <ul>
5011     *   <li>
5012     *   The function can not operate in-place.
5013     *   </li>
5014     *   <li>
5015     *   To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.
5016     *   </li>
5017     *   <li>
5018     *   This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
5019     *   </li>
5020     * </ul>
5021     *
5022     * SEE: cv::remap
5023     */
5024    public static void warpPolar(Mat src, Mat dst, Size dsize, Point center, double maxRadius, int flags) {
5025        warpPolar_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, center.x, center.y, maxRadius, flags);
5026    }
5027
5028
5029    //
5030    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1)
5031    //
5032
5033    /**
5034     * Calculates the integral of an image.
5035     *
5036     * The function calculates one or more integral images for the source image as follows:
5037     *
5038     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5039     *
5040     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5041     *
5042     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5043     *
5044     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5045     * up-right or rotated rectangular region of the image in a constant time, for example:
5046     *
5047     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5048     *
5049     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5050     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5051     *
5052     * As a practical example, the next figure shows the calculation of the integral of a straight
5053     * rectangle Rect(4,4,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5054     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5055     *
5056     * ![integral calculation example](pics/integral.png)
5057     *
5058     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5059     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5060     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5061     * floating-point (64f) array.
5062     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5063     * the same data type as sum.
5064     * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
5065     * CV_64F.
5066     * @param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F.
5067     */
5068    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth, int sqdepth) {
5069        integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth, sqdepth);
5070    }
5071
5072    /**
5073     * Calculates the integral of an image.
5074     *
5075     * The function calculates one or more integral images for the source image as follows:
5076     *
5077     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5078     *
5079     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5080     *
5081     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5082     *
5083     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5084     * up-right or rotated rectangular region of the image in a constant time, for example:
5085     *
5086     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5087     *
5088     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5089     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5090     *
5091     * As a practical example, the next figure shows the calculation of the integral of a straight
5092     * rectangle Rect(4,4,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5093     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5094     *
5095     * ![integral calculation example](pics/integral.png)
5096     *
5097     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5098     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5099     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5100     * floating-point (64f) array.
5101     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5102     * the same data type as sum.
5103     * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or
5104     * CV_64F.
5105     */
5106    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth) {
5107        integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth);
5108    }
5109
5110    /**
5111     * Calculates the integral of an image.
5112     *
5113     * The function calculates one or more integral images for the source image as follows:
5114     *
5115     * \(\texttt{sum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)\)
5116     *
5117     * \(\texttt{sqsum} (X,Y) =  \sum _{x&lt;X,y&lt;Y}  \texttt{image} (x,y)^2\)
5118     *
5119     * \(\texttt{tilted} (X,Y) =  \sum _{y&lt;Y,abs(x-X+1) \leq Y-y-1}  \texttt{image} (x,y)\)
5120     *
5121     * Using these integral images, you can calculate sum, mean, and standard deviation over a specific
5122     * up-right or rotated rectangular region of the image in a constant time, for example:
5123     *
5124     * \(\sum _{x_1 \leq x &lt; x_2,  \, y_1  \leq y &lt; y_2}  \texttt{image} (x,y) =  \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\)
5125     *
5126     * It makes possible to do a fast blurring or fast block correlation with a variable window size, for
5127     * example. In case of multi-channel images, sums for each channel are accumulated independently.
5128     *
5129     * As a practical example, the next figure shows the calculation of the integral of a straight
5130     * rectangle Rect(4,4,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the
5131     * original image are shown, as well as the relative pixels in the integral images sum and tilted .
5132     *
5133     * ![integral calculation example](pics/integral.png)
5134     *
5135     * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f).
5136     * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f).
5137     * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision
5138     * floating-point (64f) array.
5139     * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with
5140     * the same data type as sum.
5141     * CV_64F.
5142     */
5143    public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted) {
5144        integral3_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj);
5145    }
5146
5147
5148    //
5149    // C++:  void cv::integral(Mat src, Mat& sum, int sdepth = -1)
5150    //
5151
5152    public static void integral(Mat src, Mat sum, int sdepth) {
5153        integral_0(src.nativeObj, sum.nativeObj, sdepth);
5154    }
5155
5156    public static void integral(Mat src, Mat sum) {
5157        integral_1(src.nativeObj, sum.nativeObj);
5158    }
5159
5160
5161    //
5162    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1)
5163    //
5164
5165    public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth, int sqdepth) {
5166        integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth, sqdepth);
5167    }
5168
5169    public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth) {
5170        integral2_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth);
5171    }
5172
5173    public static void integral2(Mat src, Mat sum, Mat sqsum) {
5174        integral2_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj);
5175    }
5176
5177
5178    //
5179    // C++:  void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat())
5180    //
5181
5182    /**
5183     * Adds an image to the accumulator image.
5184     *
5185     * The function adds src or some of its elements to dst :
5186     *
5187     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5188     *
5189     * The function supports multi-channel images. Each channel is processed independently.
5190     *
5191     * The function cv::accumulate can be used, for example, to collect statistics of a scene background
5192     * viewed by a still camera and for the further foreground-background segmentation.
5193     *
5194     * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
5195     * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
5196     * @param mask Optional operation mask.
5197     *
5198     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5199     */
5200    public static void accumulate(Mat src, Mat dst, Mat mask) {
5201        accumulate_0(src.nativeObj, dst.nativeObj, mask.nativeObj);
5202    }
5203
5204    /**
5205     * Adds an image to the accumulator image.
5206     *
5207     * The function adds src or some of its elements to dst :
5208     *
5209     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5210     *
5211     * The function supports multi-channel images. Each channel is processed independently.
5212     *
5213     * The function cv::accumulate can be used, for example, to collect statistics of a scene background
5214     * viewed by a still camera and for the further foreground-background segmentation.
5215     *
5216     * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.
5217     * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.
5218     *
5219     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5220     */
5221    public static void accumulate(Mat src, Mat dst) {
5222        accumulate_1(src.nativeObj, dst.nativeObj);
5223    }
5224
5225
5226    //
5227    // C++:  void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
5228    //
5229
5230    /**
5231     * Adds the square of a source image to the accumulator image.
5232     *
5233     * The function adds the input image src or its selected region, raised to a power of 2, to the
5234     * accumulator dst :
5235     *
5236     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)^2  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5237     *
5238     * The function supports multi-channel images. Each channel is processed independently.
5239     *
5240     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5241     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5242     * floating-point.
5243     * @param mask Optional operation mask.
5244     *
5245     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5246     */
5247    public static void accumulateSquare(Mat src, Mat dst, Mat mask) {
5248        accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj);
5249    }
5250
5251    /**
5252     * Adds the square of a source image to the accumulator image.
5253     *
5254     * The function adds the input image src or its selected region, raised to a power of 2, to the
5255     * accumulator dst :
5256     *
5257     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src} (x,y)^2  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5258     *
5259     * The function supports multi-channel images. Each channel is processed independently.
5260     *
5261     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5262     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5263     * floating-point.
5264     *
5265     * SEE:  accumulateSquare, accumulateProduct, accumulateWeighted
5266     */
5267    public static void accumulateSquare(Mat src, Mat dst) {
5268        accumulateSquare_1(src.nativeObj, dst.nativeObj);
5269    }
5270
5271
5272    //
5273    // C++:  void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
5274    //
5275
5276    /**
5277     * Adds the per-element product of two input images to the accumulator image.
5278     *
5279     * The function adds the product of two images or their selected regions to the accumulator dst :
5280     *
5281     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src1} (x,y)  \cdot \texttt{src2} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5282     *
5283     * The function supports multi-channel images. Each channel is processed independently.
5284     *
5285     * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
5286     * @param src2 Second input image of the same type and the same size as src1 .
5287     * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
5288     * floating-point.
5289     * @param mask Optional operation mask.
5290     *
5291     * SEE:  accumulate, accumulateSquare, accumulateWeighted
5292     */
5293    public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask) {
5294        accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj);
5295    }
5296
5297    /**
5298     * Adds the per-element product of two input images to the accumulator image.
5299     *
5300     * The function adds the product of two images or their selected regions to the accumulator dst :
5301     *
5302     * \(\texttt{dst} (x,y)  \leftarrow \texttt{dst} (x,y) +  \texttt{src1} (x,y)  \cdot \texttt{src2} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5303     *
5304     * The function supports multi-channel images. Each channel is processed independently.
5305     *
5306     * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.
5307     * @param src2 Second input image of the same type and the same size as src1 .
5308     * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit
5309     * floating-point.
5310     *
5311     * SEE:  accumulate, accumulateSquare, accumulateWeighted
5312     */
5313    public static void accumulateProduct(Mat src1, Mat src2, Mat dst) {
5314        accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj);
5315    }
5316
5317
5318    //
5319    // C++:  void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
5320    //
5321
5322    /**
5323     * Updates a running average.
5324     *
5325     * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
5326     * becomes a running average of a frame sequence:
5327     *
5328     * \(\texttt{dst} (x,y)  \leftarrow (1- \texttt{alpha} )  \cdot \texttt{dst} (x,y) +  \texttt{alpha} \cdot \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5329     *
5330     * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
5331     * The function supports multi-channel images. Each channel is processed independently.
5332     *
5333     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5334     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5335     * floating-point.
5336     * @param alpha Weight of the input image.
5337     * @param mask Optional operation mask.
5338     *
5339     * SEE:  accumulate, accumulateSquare, accumulateProduct
5340     */
5341    public static void accumulateWeighted(Mat src, Mat dst, double alpha, Mat mask) {
5342        accumulateWeighted_0(src.nativeObj, dst.nativeObj, alpha, mask.nativeObj);
5343    }
5344
5345    /**
5346     * Updates a running average.
5347     *
5348     * The function calculates the weighted sum of the input image src and the accumulator dst so that dst
5349     * becomes a running average of a frame sequence:
5350     *
5351     * \(\texttt{dst} (x,y)  \leftarrow (1- \texttt{alpha} )  \cdot \texttt{dst} (x,y) +  \texttt{alpha} \cdot \texttt{src} (x,y)  \quad \text{if} \quad \texttt{mask} (x,y)  \ne 0\)
5352     *
5353     * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images).
5354     * The function supports multi-channel images. Each channel is processed independently.
5355     *
5356     * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
5357     * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit
5358     * floating-point.
5359     * @param alpha Weight of the input image.
5360     *
5361     * SEE:  accumulate, accumulateSquare, accumulateProduct
5362     */
5363    public static void accumulateWeighted(Mat src, Mat dst, double alpha) {
5364        accumulateWeighted_1(src.nativeObj, dst.nativeObj, alpha);
5365    }
5366
5367
5368    //
5369    // C++:  Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
5370    //
5371
5372    /**
5373     * The function is used to detect translational shifts that occur between two images.
5374     *
5375     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5376     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5377     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5378     *
5379     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5380     * with getOptimalDFTSize.
5381     *
5382     * The function performs the following equations:
5383     * <ul>
5384     *   <li>
5385     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5386     * image to remove possible edge effects. This window is cached until the array size changes to speed
5387     * up processing time.
5388     *   </li>
5389     *   <li>
5390     *  Next it computes the forward DFTs of each source array:
5391     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5392     * where \(\mathcal{F}\) is the forward DFT.
5393     *   </li>
5394     *   <li>
5395     *  It then computes the cross-power spectrum of each frequency domain array:
5396     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5397     *   </li>
5398     *   <li>
5399     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5400     * \(r = \mathcal{F}^{-1}\{R\}\)
5401     *   </li>
5402     *   <li>
5403     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5404     * achieve sub-pixel accuracy.
5405     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5406     *   </li>
5407     *   <li>
5408     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5409     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5410     * peak) and will be smaller when there are multiple peaks.
5411     *   </li>
5412     * </ul>
5413     *
5414     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5415     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5416     * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
5417     * @param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional).
5418     * @return detected phase shift (sub-pixel) between the two arrays.
5419     *
5420     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5421     */
5422    public static Point phaseCorrelate(Mat src1, Mat src2, Mat window, double[] response) {
5423        double[] response_out = new double[1];
5424        Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out));
5425        if(response!=null) response[0] = (double)response_out[0];
5426        return retVal;
5427    }
5428
5429    /**
5430     * The function is used to detect translational shifts that occur between two images.
5431     *
5432     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5433     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5434     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5435     *
5436     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5437     * with getOptimalDFTSize.
5438     *
5439     * The function performs the following equations:
5440     * <ul>
5441     *   <li>
5442     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5443     * image to remove possible edge effects. This window is cached until the array size changes to speed
5444     * up processing time.
5445     *   </li>
5446     *   <li>
5447     *  Next it computes the forward DFTs of each source array:
5448     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5449     * where \(\mathcal{F}\) is the forward DFT.
5450     *   </li>
5451     *   <li>
5452     *  It then computes the cross-power spectrum of each frequency domain array:
5453     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5454     *   </li>
5455     *   <li>
5456     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5457     * \(r = \mathcal{F}^{-1}\{R\}\)
5458     *   </li>
5459     *   <li>
5460     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5461     * achieve sub-pixel accuracy.
5462     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5463     *   </li>
5464     *   <li>
5465     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5466     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5467     * peak) and will be smaller when there are multiple peaks.
5468     *   </li>
5469     * </ul>
5470     *
5471     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5472     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5473     * @param window Floating point array with windowing coefficients to reduce edge effects (optional).
5474     * @return detected phase shift (sub-pixel) between the two arrays.
5475     *
5476     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5477     */
5478    public static Point phaseCorrelate(Mat src1, Mat src2, Mat window) {
5479        return new Point(phaseCorrelate_1(src1.nativeObj, src2.nativeObj, window.nativeObj));
5480    }
5481
5482    /**
5483     * The function is used to detect translational shifts that occur between two images.
5484     *
5485     * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in
5486     * the frequency domain. It can be used for fast image registration as well as motion estimation. For
5487     * more information please see &lt;http://en.wikipedia.org/wiki/Phase_correlation&gt;
5488     *
5489     * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed
5490     * with getOptimalDFTSize.
5491     *
5492     * The function performs the following equations:
5493     * <ul>
5494     *   <li>
5495     *  First it applies a Hanning window (see &lt;http://en.wikipedia.org/wiki/Hann_function&gt;) to each
5496     * image to remove possible edge effects. This window is cached until the array size changes to speed
5497     * up processing time.
5498     *   </li>
5499     *   <li>
5500     *  Next it computes the forward DFTs of each source array:
5501     * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\)
5502     * where \(\mathcal{F}\) is the forward DFT.
5503     *   </li>
5504     *   <li>
5505     *  It then computes the cross-power spectrum of each frequency domain array:
5506     * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\)
5507     *   </li>
5508     *   <li>
5509     *  Next the cross-correlation is converted back into the time domain via the inverse DFT:
5510     * \(r = \mathcal{F}^{-1}\{R\}\)
5511     *   </li>
5512     *   <li>
5513     *  Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to
5514     * achieve sub-pixel accuracy.
5515     * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\)
5516     *   </li>
5517     *   <li>
5518     *  If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5
5519     * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single
5520     * peak) and will be smaller when there are multiple peaks.
5521     *   </li>
5522     * </ul>
5523     *
5524     * @param src1 Source floating point array (CV_32FC1 or CV_64FC1)
5525     * @param src2 Source floating point array (CV_32FC1 or CV_64FC1)
5526     * @return detected phase shift (sub-pixel) between the two arrays.
5527     *
5528     * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow
5529     */
5530    public static Point phaseCorrelate(Mat src1, Mat src2) {
5531        return new Point(phaseCorrelate_2(src1.nativeObj, src2.nativeObj));
5532    }
5533
5534
5535    //
5536    // C++:  void cv::createHanningWindow(Mat& dst, Size winSize, int type)
5537    //
5538
5539    /**
5540     * This function computes a Hanning window coefficients in two dimensions.
5541     *
5542     * See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function)
5543     * for more information.
5544     *
5545     * An example is shown below:
5546     * <code>
5547     *     // create hanning window of size 100x100 and type CV_32F
5548     *     Mat hann;
5549     *     createHanningWindow(hann, Size(100, 100), CV_32F);
5550     * </code>
5551     * @param dst Destination array to place Hann coefficients in
5552     * @param winSize The window size specifications (both width and height must be &gt; 1)
5553     * @param type Created array type
5554     */
5555    public static void createHanningWindow(Mat dst, Size winSize, int type) {
5556        createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type);
5557    }
5558
5559
5560    //
5561    // C++:  void cv::divSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false)
5562    //
5563
5564    /**
5565     * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum.
5566     *
5567     * The function cv::divSpectrums performs the per-element division of the first array by the second array.
5568     * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform.
5569     *
5570     * @param a first input array.
5571     * @param b second input array of the same size and type as src1 .
5572     * @param c output array of the same size and type as src1 .
5573     * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
5574     * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a {@code 0} as value.
5575     * @param conjB optional flag that conjugates the second input array before the multiplication (true)
5576     * or not (false).
5577     */
5578    public static void divSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB) {
5579        divSpectrums_0(a.nativeObj, b.nativeObj, c.nativeObj, flags, conjB);
5580    }
5581
5582    /**
5583     * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum.
5584     *
5585     * The function cv::divSpectrums performs the per-element division of the first array by the second array.
5586     * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform.
5587     *
5588     * @param a first input array.
5589     * @param b second input array of the same size and type as src1 .
5590     * @param c output array of the same size and type as src1 .
5591     * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
5592     * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a {@code 0} as value.
5593     * or not (false).
5594     */
5595    public static void divSpectrums(Mat a, Mat b, Mat c, int flags) {
5596        divSpectrums_1(a.nativeObj, b.nativeObj, c.nativeObj, flags);
5597    }
5598
5599
5600    //
5601    // C++:  double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type)
5602    //
5603
5604    /**
5605     * Applies a fixed-level threshold to each array element.
5606     *
5607     * The function applies fixed-level thresholding to a multiple-channel array. The function is typically
5608     * used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for
5609     * this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
5610     * values. There are several types of thresholding supported by the function. They are determined by
5611     * type parameter.
5612     *
5613     * Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the
5614     * above values. In these cases, the function determines the optimal threshold value using the Otsu's
5615     * or Triangle algorithm and uses it instead of the specified thresh.
5616     *
5617     * <b>Note:</b> Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images.
5618     *
5619     * @param src input array (multiple-channel, 8-bit or 32-bit floating point).
5620     * @param dst output array of the same size  and type and the same number of channels as src.
5621     * @param thresh threshold value.
5622     * @param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding
5623     * types.
5624     * @param type thresholding type (see #ThresholdTypes).
5625     * @return the computed threshold value if Otsu's or Triangle methods used.
5626     *
5627     * SEE:  adaptiveThreshold, findContours, compare, min, max
5628     */
5629    public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type) {
5630        return threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type);
5631    }
5632
5633
5634    //
5635    // C++:  void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
5636    //
5637
5638    /**
5639     * Applies an adaptive threshold to an array.
5640     *
5641     * The function transforms a grayscale image to a binary image according to the formulae:
5642     * <ul>
5643     *   <li>
5644     *    <b>THRESH_BINARY</b>
5645     *     \(dst(x,y) =  \fork{\texttt{maxValue}}{if \(src(x,y) &gt; T(x,y)\)}{0}{otherwise}\)
5646     *   </li>
5647     *   <li>
5648     *    <b>THRESH_BINARY_INV</b>
5649     *     \(dst(x,y) =  \fork{0}{if \(src(x,y) &gt; T(x,y)\)}{\texttt{maxValue}}{otherwise}\)
5650     * where \(T(x,y)\) is a threshold calculated individually for each pixel (see adaptiveMethod parameter).
5651     *   </li>
5652     * </ul>
5653     *
5654     * The function can process the image in-place.
5655     *
5656     * @param src Source 8-bit single-channel image.
5657     * @param dst Destination image of the same size and the same type as src.
5658     * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied
5659     * @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.
5660     * The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.
5661     * @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,
5662     * see #ThresholdTypes.
5663     * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the
5664     * pixel: 3, 5, 7, and so on.
5665     * @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it
5666     * is positive but may be zero or negative as well.
5667     *
5668     * SEE:  threshold, blur, GaussianBlur
5669     */
5670    public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) {
5671        adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C);
5672    }
5673
5674
5675    //
5676    // C++:  void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
5677    //
5678
5679    /**
5680     * Blurs an image and downsamples it.
5681     *
5682     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5683     * any case, the following conditions should be satisfied:
5684     *
5685     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5686     *
5687     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5688     * convolves the source image with the kernel:
5689     *
5690     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5691     *
5692     * Then, it downsamples the image by rejecting even rows and columns.
5693     *
5694     * @param src input image.
5695     * @param dst output image; it has the specified size and the same type as src.
5696     * @param dstsize size of the output image.
5697     * @param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported)
5698     */
5699    public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) {
5700        pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType);
5701    }
5702
5703    /**
5704     * Blurs an image and downsamples it.
5705     *
5706     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5707     * any case, the following conditions should be satisfied:
5708     *
5709     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5710     *
5711     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5712     * convolves the source image with the kernel:
5713     *
5714     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5715     *
5716     * Then, it downsamples the image by rejecting even rows and columns.
5717     *
5718     * @param src input image.
5719     * @param dst output image; it has the specified size and the same type as src.
5720     * @param dstsize size of the output image.
5721     */
5722    public static void pyrDown(Mat src, Mat dst, Size dstsize) {
5723        pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height);
5724    }
5725
5726    /**
5727     * Blurs an image and downsamples it.
5728     *
5729     * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in
5730     * any case, the following conditions should be satisfied:
5731     *
5732     * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\)
5733     *
5734     * The function performs the downsampling step of the Gaussian pyramid construction. First, it
5735     * convolves the source image with the kernel:
5736     *
5737     * \(\frac{1}{256} \begin{bmatrix} 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 6 &amp; 24 &amp; 36 &amp; 24 &amp; 6  \\ 4 &amp; 16 &amp; 24 &amp; 16 &amp; 4  \\ 1 &amp; 4 &amp; 6 &amp; 4 &amp; 1 \end{bmatrix}\)
5738     *
5739     * Then, it downsamples the image by rejecting even rows and columns.
5740     *
5741     * @param src input image.
5742     * @param dst output image; it has the specified size and the same type as src.
5743     */
5744    public static void pyrDown(Mat src, Mat dst) {
5745        pyrDown_2(src.nativeObj, dst.nativeObj);
5746    }
5747
5748
5749    //
5750    // C++:  void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
5751    //
5752
5753    /**
5754     * Upsamples an image and then blurs it.
5755     *
5756     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5757     * case, the following conditions should be satisfied:
5758     *
5759     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5760     *
5761     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5762     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5763     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5764     * pyrDown multiplied by 4.
5765     *
5766     * @param src input image.
5767     * @param dst output image. It has the specified size and the same type as src .
5768     * @param dstsize size of the output image.
5769     * @param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported)
5770     */
5771    public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) {
5772        pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType);
5773    }
5774
5775    /**
5776     * Upsamples an image and then blurs it.
5777     *
5778     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5779     * case, the following conditions should be satisfied:
5780     *
5781     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5782     *
5783     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5784     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5785     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5786     * pyrDown multiplied by 4.
5787     *
5788     * @param src input image.
5789     * @param dst output image. It has the specified size and the same type as src .
5790     * @param dstsize size of the output image.
5791     */
5792    public static void pyrUp(Mat src, Mat dst, Size dstsize) {
5793        pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height);
5794    }
5795
5796    /**
5797     * Upsamples an image and then blurs it.
5798     *
5799     * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any
5800     * case, the following conditions should be satisfied:
5801     *
5802     * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq  ( \texttt{dstsize.width}   \mod  2)  \\ | \texttt{dstsize.height} -src.rows*2| \leq  ( \texttt{dstsize.height}   \mod  2) \end{array}\)
5803     *
5804     * The function performs the upsampling step of the Gaussian pyramid construction, though it can
5805     * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by
5806     * injecting even zero rows and columns and then convolves the result with the same kernel as in
5807     * pyrDown multiplied by 4.
5808     *
5809     * @param src input image.
5810     * @param dst output image. It has the specified size and the same type as src .
5811     */
5812    public static void pyrUp(Mat src, Mat dst) {
5813        pyrUp_2(src.nativeObj, dst.nativeObj);
5814    }
5815
5816
5817    //
5818    // C++:  void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
5819    //
5820
5821    /**
5822     *
5823     *
5824     * this variant supports only uniform histograms.
5825     *
5826     * ranges argument is either empty vector or a flattened vector of histSize.size()*2 elements
5827     * (histSize.size() element pairs). The first and second elements of each pair specify the lower and
5828     * upper boundaries.
5829     * @param images automatically generated
5830     * @param channels automatically generated
5831     * @param mask automatically generated
5832     * @param hist automatically generated
5833     * @param histSize automatically generated
5834     * @param ranges automatically generated
5835     * @param accumulate automatically generated
5836     */
5837    public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges, boolean accumulate) {
5838        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5839        Mat channels_mat = channels;
5840        Mat histSize_mat = histSize;
5841        Mat ranges_mat = ranges;
5842        calcHist_0(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj, accumulate);
5843    }
5844
5845    /**
5846     *
5847     *
5848     * this variant supports only uniform histograms.
5849     *
5850     * ranges argument is either empty vector or a flattened vector of histSize.size()*2 elements
5851     * (histSize.size() element pairs). The first and second elements of each pair specify the lower and
5852     * upper boundaries.
5853     * @param images automatically generated
5854     * @param channels automatically generated
5855     * @param mask automatically generated
5856     * @param hist automatically generated
5857     * @param histSize automatically generated
5858     * @param ranges automatically generated
5859     */
5860    public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges) {
5861        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5862        Mat channels_mat = channels;
5863        Mat histSize_mat = histSize;
5864        Mat ranges_mat = ranges;
5865        calcHist_1(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj);
5866    }
5867
5868
5869    //
5870    // C++:  void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
5871    //
5872
5873    public static void calcBackProject(List<Mat> images, MatOfInt channels, Mat hist, Mat dst, MatOfFloat ranges, double scale) {
5874        Mat images_mat = Converters.vector_Mat_to_Mat(images);
5875        Mat channels_mat = channels;
5876        Mat ranges_mat = ranges;
5877        calcBackProject_0(images_mat.nativeObj, channels_mat.nativeObj, hist.nativeObj, dst.nativeObj, ranges_mat.nativeObj, scale);
5878    }
5879
5880
5881    //
5882    // C++:  double cv::compareHist(Mat H1, Mat H2, int method)
5883    //
5884
5885    /**
5886     * Compares two histograms.
5887     *
5888     * The function cv::compareHist compares two dense or two sparse histograms using the specified method.
5889     *
5890     * The function returns \(d(H_1, H_2)\) .
5891     *
5892     * While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable
5893     * for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling
5894     * problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms
5895     * or more general sparse configurations of weighted points, consider using the #EMD function.
5896     *
5897     * @param H1 First compared histogram.
5898     * @param H2 Second compared histogram of the same size as H1 .
5899     * @param method Comparison method, see #HistCompMethods
5900     * @return automatically generated
5901     */
5902    public static double compareHist(Mat H1, Mat H2, int method) {
5903        return compareHist_0(H1.nativeObj, H2.nativeObj, method);
5904    }
5905
5906
5907    //
5908    // C++:  void cv::equalizeHist(Mat src, Mat& dst)
5909    //
5910
5911    /**
5912     * Equalizes the histogram of a grayscale image.
5913     *
5914     * The function equalizes the histogram of the input image using the following algorithm:
5915     *
5916     * <ul>
5917     *   <li>
5918     *  Calculate the histogram \(H\) for src .
5919     *   </li>
5920     *   <li>
5921     *  Normalize the histogram so that the sum of histogram bins is 255.
5922     *   </li>
5923     *   <li>
5924     *  Compute the integral of the histogram:
5925     * \(H'_i =  \sum _{0  \le j &lt; i} H(j)\)
5926     *   </li>
5927     *   <li>
5928     *  Transform the image using \(H'\) as a look-up table: \(\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\)
5929     *   </li>
5930     * </ul>
5931     *
5932     * The algorithm normalizes the brightness and increases the contrast of the image.
5933     *
5934     * @param src Source 8-bit single channel image.
5935     * @param dst Destination image of the same size and type as src .
5936     */
5937    public static void equalizeHist(Mat src, Mat dst) {
5938        equalizeHist_0(src.nativeObj, dst.nativeObj);
5939    }
5940
5941
5942    //
5943    // C++:  Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
5944    //
5945
5946    /**
5947     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5948     *
5949     * @param clipLimit Threshold for contrast limiting.
5950     * @param tileGridSize Size of grid for histogram equalization. Input image will be divided into
5951     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5952     * @return automatically generated
5953     */
5954    public static CLAHE createCLAHE(double clipLimit, Size tileGridSize) {
5955        return CLAHE.__fromPtr__(createCLAHE_0(clipLimit, tileGridSize.width, tileGridSize.height));
5956    }
5957
5958    /**
5959     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5960     *
5961     * @param clipLimit Threshold for contrast limiting.
5962     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5963     * @return automatically generated
5964     */
5965    public static CLAHE createCLAHE(double clipLimit) {
5966        return CLAHE.__fromPtr__(createCLAHE_1(clipLimit));
5967    }
5968
5969    /**
5970     * Creates a smart pointer to a cv::CLAHE class and initializes it.
5971     *
5972     * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column.
5973     * @return automatically generated
5974     */
5975    public static CLAHE createCLAHE() {
5976        return CLAHE.__fromPtr__(createCLAHE_2());
5977    }
5978
5979
5980    //
5981    // C++:  float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat())
5982    //
5983
5984    /**
5985     * Computes the "minimal work" distance between two weighted point configurations.
5986     *
5987     * The function computes the earth mover distance and/or a lower boundary of the distance between the
5988     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
5989     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
5990     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
5991     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
5992     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
5993     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
5994     * same object.
5995     *
5996     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
5997     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
5998     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
5999     * non-negative and have at least one non-zero value.
6000     * @param signature2 Second signature of the same format as signature1 , though the number of rows
6001     * may be different. The total weights may be different. In this case an extra "dummy" point is added
6002     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
6003     * value.
6004     * @param distType Used metric. See #DistanceTypes.
6005     * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix
6006     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
6007     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
6008     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
6009     * if the signatures consist of weights only (the signature matrices have a single column). You
6010     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
6011     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
6012     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
6013     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
6014     * should be set to 0.
6015     * @param flow Resultant \(\texttt{size1} \times \texttt{size2}\) flow matrix: \(\texttt{flow}_{i,j}\) is
6016     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
6017     * @return automatically generated
6018     */
6019    public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost, Mat flow) {
6020        return EMD_0(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj, flow.nativeObj);
6021    }
6022
6023    /**
6024     * Computes the "minimal work" distance between two weighted point configurations.
6025     *
6026     * The function computes the earth mover distance and/or a lower boundary of the distance between the
6027     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
6028     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
6029     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
6030     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
6031     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
6032     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
6033     * same object.
6034     *
6035     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
6036     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
6037     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
6038     * non-negative and have at least one non-zero value.
6039     * @param signature2 Second signature of the same format as signature1 , though the number of rows
6040     * may be different. The total weights may be different. In this case an extra "dummy" point is added
6041     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
6042     * value.
6043     * @param distType Used metric. See #DistanceTypes.
6044     * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix
6045     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
6046     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
6047     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
6048     * if the signatures consist of weights only (the signature matrices have a single column). You
6049     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
6050     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
6051     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
6052     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
6053     * should be set to 0.
6054     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
6055     * @return automatically generated
6056     */
6057    public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost) {
6058        return EMD_1(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj);
6059    }
6060
6061    /**
6062     * Computes the "minimal work" distance between two weighted point configurations.
6063     *
6064     * The function computes the earth mover distance and/or a lower boundary of the distance between the
6065     * two weighted point configurations. One of the applications described in CITE: RubnerSept98,
6066     * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation
6067     * problem that is solved using some modification of a simplex algorithm, thus the complexity is
6068     * exponential in the worst case, though, on average it is much faster. In the case of a real metric
6069     * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used
6070     * to determine roughly whether the two signatures are far enough so that they cannot relate to the
6071     * same object.
6072     *
6073     * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix.
6074     * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have
6075     * a single column (weights only) if the user-defined cost matrix is used. The weights must be
6076     * non-negative and have at least one non-zero value.
6077     * @param signature2 Second signature of the same format as signature1 , though the number of rows
6078     * may be different. The total weights may be different. In this case an extra "dummy" point is added
6079     * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero
6080     * value.
6081     * @param distType Used metric. See #DistanceTypes.
6082     * is used, lower boundary lowerBound cannot be calculated because it needs a metric function.
6083     * signatures that is a distance between mass centers. The lower boundary may not be calculated if
6084     * the user-defined cost matrix is used, the total weights of point configurations are not equal, or
6085     * if the signatures consist of weights only (the signature matrices have a single column). You
6086     * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or
6087     * equal to \*lowerBound (it means that the signatures are far enough), the function does not
6088     * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on
6089     * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound
6090     * should be set to 0.
6091     * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 .
6092     * @return automatically generated
6093     */
6094    public static float EMD(Mat signature1, Mat signature2, int distType) {
6095        return EMD_3(signature1.nativeObj, signature2.nativeObj, distType);
6096    }
6097
6098
6099    //
6100    // C++:  void cv::watershed(Mat image, Mat& markers)
6101    //
6102
6103    /**
6104     * Performs a marker-based image segmentation using the watershed algorithm.
6105     *
6106     * The function implements one of the variants of watershed, non-parametric marker-based segmentation
6107     * algorithm, described in CITE: Meyer92 .
6108     *
6109     * Before passing the image to the function, you have to roughly outline the desired regions in the
6110     * image markers with positive (&gt;0) indices. So, every region is represented as one or more connected
6111     * components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary
6112     * mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are "seeds" of
6113     * the future image regions. All the other pixels in markers , whose relation to the outlined regions
6114     * is not known and should be defined by the algorithm, should be set to 0's. In the function output,
6115     * each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the
6116     * regions.
6117     *
6118     * <b>Note:</b> Any two neighbor connected components are not necessarily separated by a watershed boundary
6119     * (-1's pixels); for example, they can touch each other in the initial marker image passed to the
6120     * function.
6121     *
6122     * @param image Input 8-bit 3-channel image.
6123     * @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same
6124     * size as image .
6125     *
6126     * SEE: findContours
6127     */
6128    public static void watershed(Mat image, Mat markers) {
6129        watershed_0(image.nativeObj, markers.nativeObj);
6130    }
6131
6132
6133    //
6134    // C++:  void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
6135    //
6136
6137    /**
6138     * Performs initial step of meanshift segmentation of an image.
6139     *
6140     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6141     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6142     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6143     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6144     * considered:
6145     *
6146     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6147     *
6148     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6149     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6150     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6151     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6152     *
6153     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6154     *
6155     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6156     * the iterations started) are set to the final value (average color at the last iteration):
6157     *
6158     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6159     *
6160     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6161     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6162     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6163     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6164     * results will be actually different from the ones obtained by running the meanshift procedure on the
6165     * whole original image (i.e. when maxLevel==0).
6166     *
6167     * @param src The source 8-bit, 3-channel image.
6168     * @param dst The destination image of the same format and the same size as the source.
6169     * @param sp The spatial window radius.
6170     * @param sr The color window radius.
6171     * @param maxLevel Maximum level of the pyramid for the segmentation.
6172     * @param termcrit Termination criteria: when to stop meanshift iterations.
6173     */
6174    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel, TermCriteria termcrit) {
6175        pyrMeanShiftFiltering_0(src.nativeObj, dst.nativeObj, sp, sr, maxLevel, termcrit.type, termcrit.maxCount, termcrit.epsilon);
6176    }
6177
6178    /**
6179     * Performs initial step of meanshift segmentation of an image.
6180     *
6181     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6182     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6183     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6184     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6185     * considered:
6186     *
6187     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6188     *
6189     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6190     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6191     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6192     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6193     *
6194     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6195     *
6196     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6197     * the iterations started) are set to the final value (average color at the last iteration):
6198     *
6199     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6200     *
6201     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6202     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6203     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6204     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6205     * results will be actually different from the ones obtained by running the meanshift procedure on the
6206     * whole original image (i.e. when maxLevel==0).
6207     *
6208     * @param src The source 8-bit, 3-channel image.
6209     * @param dst The destination image of the same format and the same size as the source.
6210     * @param sp The spatial window radius.
6211     * @param sr The color window radius.
6212     * @param maxLevel Maximum level of the pyramid for the segmentation.
6213     */
6214    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel) {
6215        pyrMeanShiftFiltering_1(src.nativeObj, dst.nativeObj, sp, sr, maxLevel);
6216    }
6217
6218    /**
6219     * Performs initial step of meanshift segmentation of an image.
6220     *
6221     * The function implements the filtering stage of meanshift segmentation, that is, the output of the
6222     * function is the filtered "posterized" image with color gradients and fine-grain texture flattened.
6223     * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes
6224     * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is
6225     * considered:
6226     *
6227     * \((x,y): X- \texttt{sp} \le x  \le X+ \texttt{sp} , Y- \texttt{sp} \le y  \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)||   \le \texttt{sr}\)
6228     *
6229     * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively
6230     * (though, the algorithm does not depend on the color space used, so any 3-component color space can
6231     * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector
6232     * (R',G',B') are found and they act as the neighborhood center on the next iteration:
6233     *
6234     * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\)
6235     *
6236     * After the iterations over, the color components of the initial pixel (that is, the pixel from where
6237     * the iterations started) are set to the final value (average color at the last iteration):
6238     *
6239     * \(I(X,Y) &lt;- (R*,G*,B*)\)
6240     *
6241     * When maxLevel &gt; 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is
6242     * run on the smallest layer first. After that, the results are propagated to the larger layer and the
6243     * iterations are run again only on those pixels where the layer colors differ by more than sr from the
6244     * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the
6245     * results will be actually different from the ones obtained by running the meanshift procedure on the
6246     * whole original image (i.e. when maxLevel==0).
6247     *
6248     * @param src The source 8-bit, 3-channel image.
6249     * @param dst The destination image of the same format and the same size as the source.
6250     * @param sp The spatial window radius.
6251     * @param sr The color window radius.
6252     */
6253    public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr) {
6254        pyrMeanShiftFiltering_2(src.nativeObj, dst.nativeObj, sp, sr);
6255    }
6256
6257
6258    //
6259    // C++:  void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
6260    //
6261
6262    /**
6263     * Runs the GrabCut algorithm.
6264     *
6265     * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
6266     *
6267     * @param img Input 8-bit 3-channel image.
6268     * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
6269     * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
6270     * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
6271     * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
6272     * @param bgdModel Temporary array for the background model. Do not modify it while you are
6273     * processing the same image.
6274     * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
6275     * processing the same image.
6276     * @param iterCount Number of iterations the algorithm should make before returning the result. Note
6277     * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
6278     * mode==GC_EVAL .
6279     * @param mode Operation mode that could be one of the #GrabCutModes
6280     */
6281    public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode) {
6282        grabCut_0(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount, mode);
6283    }
6284
6285    /**
6286     * Runs the GrabCut algorithm.
6287     *
6288     * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).
6289     *
6290     * @param img Input 8-bit 3-channel image.
6291     * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when
6292     * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.
6293     * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as
6294     * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT .
6295     * @param bgdModel Temporary array for the background model. Do not modify it while you are
6296     * processing the same image.
6297     * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are
6298     * processing the same image.
6299     * @param iterCount Number of iterations the algorithm should make before returning the result. Note
6300     * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or
6301     * mode==GC_EVAL .
6302     */
6303    public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount) {
6304        grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount);
6305    }
6306
6307
6308    //
6309    // C++:  void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
6310    //
6311
6312    /**
6313     * Calculates the distance to the closest zero pixel for each pixel of the source image.
6314     *
6315     * The function cv::distanceTransform calculates the approximate or precise distance from every binary
6316     * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
6317     *
6318     * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
6319     * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
6320     *
6321     * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
6322     * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
6323     * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall
6324     * distance is calculated as a sum of these basic distances. Since the distance function should be
6325     * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
6326     * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the
6327     * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated
6328     * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
6329     * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV
6330     * uses the values suggested in the original paper:
6331     * <ul>
6332     *   <li>
6333     *  DIST_L1: {@code a = 1, b = 2}
6334     *   </li>
6335     *   <li>
6336     *  DIST_L2:
6337     *   <ul>
6338     *     <li>
6339     *      {@code 3 x 3}: {@code a=0.955, b=1.3693}
6340     *     </li>
6341     *     <li>
6342     *      {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969}
6343     *     </li>
6344     *   </ul>
6345     *   <li>
6346     *  DIST_C: {@code a = 1, b = 1}
6347     *   </li>
6348     * </ul>
6349     *
6350     * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a
6351     * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used.
6352     * Note that both the precise and the approximate algorithms are linear on the number of pixels.
6353     *
6354     * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\)
6355     * but also identifies the nearest connected component consisting of zero pixels
6356     * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
6357     * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function
6358     * automatically finds connected components of zero pixels in the input image and marks them with
6359     * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
6360     * marks all the zero pixels with distinct labels.
6361     *
6362     * In this mode, the complexity is still linear. That is, the function provides a very fast way to
6363     * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
6364     * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
6365     * yet.
6366     *
6367     * @param src 8-bit, single-channel (binary) source image.
6368     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6369     * single-channel image of the same size as src.
6370     * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
6371     * CV_32SC1 and the same size as src.
6372     * @param distanceType Type of distance, see #DistanceTypes
6373     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
6374     * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
6375     * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times
6376     * 5\) or any larger aperture.
6377     * @param labelType Type of the label array to build, see #DistanceTransformLabelTypes.
6378     */
6379    public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) {
6380        distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType);
6381    }
6382
6383    /**
6384     * Calculates the distance to the closest zero pixel for each pixel of the source image.
6385     *
6386     * The function cv::distanceTransform calculates the approximate or precise distance from every binary
6387     * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.
6388     *
6389     * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the
6390     * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library.
6391     *
6392     * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function
6393     * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,
6394     * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall
6395     * distance is calculated as a sum of these basic distances. Since the distance function should be
6396     * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all
6397     * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the
6398     * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated
6399     * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a
6400     * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV
6401     * uses the values suggested in the original paper:
6402     * <ul>
6403     *   <li>
6404     *  DIST_L1: {@code a = 1, b = 2}
6405     *   </li>
6406     *   <li>
6407     *  DIST_L2:
6408     *   <ul>
6409     *     <li>
6410     *      {@code 3 x 3}: {@code a=0.955, b=1.3693}
6411     *     </li>
6412     *     <li>
6413     *      {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969}
6414     *     </li>
6415     *   </ul>
6416     *   <li>
6417     *  DIST_C: {@code a = 1, b = 1}
6418     *   </li>
6419     * </ul>
6420     *
6421     * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a
6422     * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used.
6423     * Note that both the precise and the approximate algorithms are linear on the number of pixels.
6424     *
6425     * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\)
6426     * but also identifies the nearest connected component consisting of zero pixels
6427     * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the
6428     * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function
6429     * automatically finds connected components of zero pixels in the input image and marks them with
6430     * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and
6431     * marks all the zero pixels with distinct labels.
6432     *
6433     * In this mode, the complexity is still linear. That is, the function provides a very fast way to
6434     * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the
6435     * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported
6436     * yet.
6437     *
6438     * @param src 8-bit, single-channel (binary) source image.
6439     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6440     * single-channel image of the same size as src.
6441     * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type
6442     * CV_32SC1 and the same size as src.
6443     * @param distanceType Type of distance, see #DistanceTypes
6444     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.
6445     * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,
6446     * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times
6447     * 5\) or any larger aperture.
6448     */
6449    public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize) {
6450        distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize);
6451    }
6452
6453
6454    //
6455    // C++:  void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F)
6456    //
6457
6458    /**
6459     *
6460     * @param src 8-bit, single-channel (binary) source image.
6461     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6462     * single-channel image of the same size as src .
6463     * @param distanceType Type of distance, see #DistanceTypes
6464     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
6465     * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives
6466     * the same result as \(5\times 5\) or any larger aperture.
6467     * @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for
6468     * the first variant of the function and distanceType == #DIST_L1.
6469     */
6470    public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize, int dstType) {
6471        distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize, dstType);
6472    }
6473
6474    /**
6475     *
6476     * @param src 8-bit, single-channel (binary) source image.
6477     * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,
6478     * single-channel image of the same size as src .
6479     * @param distanceType Type of distance, see #DistanceTypes
6480     * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the
6481     * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives
6482     * the same result as \(5\times 5\) or any larger aperture.
6483     * the first variant of the function and distanceType == #DIST_L1.
6484     */
6485    public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize) {
6486        distanceTransform_1(src.nativeObj, dst.nativeObj, distanceType, maskSize);
6487    }
6488
6489
6490    //
6491    // C++:  int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
6492    //
6493
6494    /**
6495     * Fills a connected component with the given color.
6496     *
6497     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6498     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6499     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6500     *
6501     * <ul>
6502     *   <li>
6503     *  in case of a grayscale image and floating range
6504     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6505     *   </li>
6506     * </ul>
6507     *
6508     *
6509     * <ul>
6510     *   <li>
6511     *  in case of a grayscale image and fixed range
6512     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6513     *   </li>
6514     * </ul>
6515     *
6516     *
6517     * <ul>
6518     *   <li>
6519     *  in case of a color image and floating range
6520     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6521     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6522     * and
6523     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6524     *   </li>
6525     * </ul>
6526     *
6527     *
6528     * <ul>
6529     *   <li>
6530     *  in case of a color image and fixed range
6531     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6532     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6533     * and
6534     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6535     *   </li>
6536     * </ul>
6537     *
6538     *
6539     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6540     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6541     * be close enough to:
6542     * <ul>
6543     *   <li>
6544     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6545     * of a floating range.
6546     *   </li>
6547     *   <li>
6548     *  Color/brightness of the seed point in case of a fixed range.
6549     *   </li>
6550     * </ul>
6551     *
6552     * Use these functions to either mark a connected component with the specified color in-place, or build
6553     * a mask and then extract the contour, or copy the region to another image, and so on.
6554     *
6555     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6556     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6557     * the details below.
6558     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6559     * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
6560     * input and output parameter, you must take responsibility of initializing it.
6561     * Flood-filling cannot go across non-zero pixels in the input mask. For example,
6562     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6563     * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
6564     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6565     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6566     * to make sure the filled areas do not overlap.
6567     * @param seedPoint Starting point.
6568     * @param newVal New value of the repainted domain pixels.
6569     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6570     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6571     * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
6572     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6573     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6574     * repainted domain.
6575     * @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of
6576     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6577     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6578     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6579     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6580     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6581     * bits and therefore may be further combined with the connectivity and mask fill values using
6582     * bit-wise or (|), see #FloodFillFlags.
6583     *
6584     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6585     * pixel \((x+1, y+1)\) in the mask .
6586     *
6587     * SEE: findContours
6588     * @return automatically generated
6589     */
6590    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) {
6591        double[] rect_out = new double[4];
6592        int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags);
6593        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6594        return retVal;
6595    }
6596
6597    /**
6598     * Fills a connected component with the given color.
6599     *
6600     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6601     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6602     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6603     *
6604     * <ul>
6605     *   <li>
6606     *  in case of a grayscale image and floating range
6607     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6608     *   </li>
6609     * </ul>
6610     *
6611     *
6612     * <ul>
6613     *   <li>
6614     *  in case of a grayscale image and fixed range
6615     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6616     *   </li>
6617     * </ul>
6618     *
6619     *
6620     * <ul>
6621     *   <li>
6622     *  in case of a color image and floating range
6623     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6624     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6625     * and
6626     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6627     *   </li>
6628     * </ul>
6629     *
6630     *
6631     * <ul>
6632     *   <li>
6633     *  in case of a color image and fixed range
6634     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6635     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6636     * and
6637     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6638     *   </li>
6639     * </ul>
6640     *
6641     *
6642     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6643     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6644     * be close enough to:
6645     * <ul>
6646     *   <li>
6647     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6648     * of a floating range.
6649     *   </li>
6650     *   <li>
6651     *  Color/brightness of the seed point in case of a fixed range.
6652     *   </li>
6653     * </ul>
6654     *
6655     * Use these functions to either mark a connected component with the specified color in-place, or build
6656     * a mask and then extract the contour, or copy the region to another image, and so on.
6657     *
6658     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6659     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6660     * the details below.
6661     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6662     * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
6663     * input and output parameter, you must take responsibility of initializing it.
6664     * Flood-filling cannot go across non-zero pixels in the input mask. For example,
6665     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6666     * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
6667     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6668     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6669     * to make sure the filled areas do not overlap.
6670     * @param seedPoint Starting point.
6671     * @param newVal New value of the repainted domain pixels.
6672     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6673     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6674     * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and
6675     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6676     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6677     * repainted domain.
6678     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6679     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6680     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6681     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6682     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6683     * bits and therefore may be further combined with the connectivity and mask fill values using
6684     * bit-wise or (|), see #FloodFillFlags.
6685     *
6686     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6687     * pixel \((x+1, y+1)\) in the mask .
6688     *
6689     * SEE: findContours
6690     * @return automatically generated
6691     */
6692    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff) {
6693        double[] rect_out = new double[4];
6694        int retVal = floodFill_1(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3]);
6695        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6696        return retVal;
6697    }
6698
6699    /**
6700     * Fills a connected component with the given color.
6701     *
6702     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6703     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6704     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6705     *
6706     * <ul>
6707     *   <li>
6708     *  in case of a grayscale image and floating range
6709     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6710     *   </li>
6711     * </ul>
6712     *
6713     *
6714     * <ul>
6715     *   <li>
6716     *  in case of a grayscale image and fixed range
6717     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6718     *   </li>
6719     * </ul>
6720     *
6721     *
6722     * <ul>
6723     *   <li>
6724     *  in case of a color image and floating range
6725     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6726     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6727     * and
6728     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6729     *   </li>
6730     * </ul>
6731     *
6732     *
6733     * <ul>
6734     *   <li>
6735     *  in case of a color image and fixed range
6736     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6737     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6738     * and
6739     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6740     *   </li>
6741     * </ul>
6742     *
6743     *
6744     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6745     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6746     * be close enough to:
6747     * <ul>
6748     *   <li>
6749     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6750     * of a floating range.
6751     *   </li>
6752     *   <li>
6753     *  Color/brightness of the seed point in case of a fixed range.
6754     *   </li>
6755     * </ul>
6756     *
6757     * Use these functions to either mark a connected component with the specified color in-place, or build
6758     * a mask and then extract the contour, or copy the region to another image, and so on.
6759     *
6760     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6761     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6762     * the details below.
6763     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6764     * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
6765     * input and output parameter, you must take responsibility of initializing it.
6766     * Flood-filling cannot go across non-zero pixels in the input mask. For example,
6767     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6768     * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
6769     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6770     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6771     * to make sure the filled areas do not overlap.
6772     * @param seedPoint Starting point.
6773     * @param newVal New value of the repainted domain pixels.
6774     * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and
6775     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6776     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6777     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6778     * repainted domain.
6779     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6780     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6781     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6782     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6783     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6784     * bits and therefore may be further combined with the connectivity and mask fill values using
6785     * bit-wise or (|), see #FloodFillFlags.
6786     *
6787     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6788     * pixel \((x+1, y+1)\) in the mask .
6789     *
6790     * SEE: findContours
6791     * @return automatically generated
6792     */
6793    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff) {
6794        double[] rect_out = new double[4];
6795        int retVal = floodFill_2(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3]);
6796        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6797        return retVal;
6798    }
6799
6800    /**
6801     * Fills a connected component with the given color.
6802     *
6803     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6804     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6805     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6806     *
6807     * <ul>
6808     *   <li>
6809     *  in case of a grayscale image and floating range
6810     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6811     *   </li>
6812     * </ul>
6813     *
6814     *
6815     * <ul>
6816     *   <li>
6817     *  in case of a grayscale image and fixed range
6818     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6819     *   </li>
6820     * </ul>
6821     *
6822     *
6823     * <ul>
6824     *   <li>
6825     *  in case of a color image and floating range
6826     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6827     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6828     * and
6829     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6830     *   </li>
6831     * </ul>
6832     *
6833     *
6834     * <ul>
6835     *   <li>
6836     *  in case of a color image and fixed range
6837     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6838     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6839     * and
6840     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6841     *   </li>
6842     * </ul>
6843     *
6844     *
6845     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6846     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6847     * be close enough to:
6848     * <ul>
6849     *   <li>
6850     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6851     * of a floating range.
6852     *   </li>
6853     *   <li>
6854     *  Color/brightness of the seed point in case of a fixed range.
6855     *   </li>
6856     * </ul>
6857     *
6858     * Use these functions to either mark a connected component with the specified color in-place, or build
6859     * a mask and then extract the contour, or copy the region to another image, and so on.
6860     *
6861     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6862     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6863     * the details below.
6864     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6865     * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
6866     * input and output parameter, you must take responsibility of initializing it.
6867     * Flood-filling cannot go across non-zero pixels in the input mask. For example,
6868     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6869     * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
6870     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6871     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6872     * to make sure the filled areas do not overlap.
6873     * @param seedPoint Starting point.
6874     * @param newVal New value of the repainted domain pixels.
6875     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6876     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6877     * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the
6878     * repainted domain.
6879     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6880     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6881     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6882     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6883     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6884     * bits and therefore may be further combined with the connectivity and mask fill values using
6885     * bit-wise or (|), see #FloodFillFlags.
6886     *
6887     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6888     * pixel \((x+1, y+1)\) in the mask .
6889     *
6890     * SEE: findContours
6891     * @return automatically generated
6892     */
6893    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect) {
6894        double[] rect_out = new double[4];
6895        int retVal = floodFill_3(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out);
6896        if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 
6897        return retVal;
6898    }
6899
6900    /**
6901     * Fills a connected component with the given color.
6902     *
6903     * The function cv::floodFill fills a connected component starting from the seed point with the specified
6904     * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The
6905     * pixel at \((x,y)\) is considered to belong to the repainted domain if:
6906     *
6907     * <ul>
6908     *   <li>
6909     *  in case of a grayscale image and floating range
6910     * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} (x',y')+ \texttt{upDiff}\)
6911     *   </li>
6912     * </ul>
6913     *
6914     *
6915     * <ul>
6916     *   <li>
6917     *  in case of a grayscale image and fixed range
6918     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y)  \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\)
6919     *   </li>
6920     * </ul>
6921     *
6922     *
6923     * <ul>
6924     *   <li>
6925     *  in case of a color image and floating range
6926     * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\)
6927     * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\)
6928     * and
6929     * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\)
6930     *   </li>
6931     * </ul>
6932     *
6933     *
6934     * <ul>
6935     *   <li>
6936     *  in case of a color image and fixed range
6937     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\)
6938     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\)
6939     * and
6940     * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\)
6941     *   </li>
6942     * </ul>
6943     *
6944     *
6945     * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the
6946     * component. That is, to be added to the connected component, a color/brightness of the pixel should
6947     * be close enough to:
6948     * <ul>
6949     *   <li>
6950     *  Color/brightness of one of its neighbors that already belong to the connected component in case
6951     * of a floating range.
6952     *   </li>
6953     *   <li>
6954     *  Color/brightness of the seed point in case of a fixed range.
6955     *   </li>
6956     * </ul>
6957     *
6958     * Use these functions to either mark a connected component with the specified color in-place, or build
6959     * a mask and then extract the contour, or copy the region to another image, and so on.
6960     *
6961     * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the
6962     * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
6963     * the details below.
6964     * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
6965     * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
6966     * input and output parameter, you must take responsibility of initializing it.
6967     * Flood-filling cannot go across non-zero pixels in the input mask. For example,
6968     * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
6969     * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
6970     * as described below. Additionally, the function fills the border of the mask with ones to simplify
6971     * internal processing. It is therefore possible to use the same mask in multiple calls to the function
6972     * to make sure the filled areas do not overlap.
6973     * @param seedPoint Starting point.
6974     * @param newVal New value of the repainted domain pixels.
6975     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6976     * one of its neighbors belonging to the component, or a seed pixel being added to the component.
6977     * repainted domain.
6978     * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A
6979     * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)
6980     * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill
6981     * the mask (the default value is 1). For example, 4 | ( 255 &lt;&lt; 8 ) will consider 4 nearest
6982     * neighbours and fill the mask with a value of 255. The following additional options occupy higher
6983     * bits and therefore may be further combined with the connectivity and mask fill values using
6984     * bit-wise or (|), see #FloodFillFlags.
6985     *
6986     * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the
6987     * pixel \((x+1, y+1)\) in the mask .
6988     *
6989     * SEE: findContours
6990     * @return automatically generated
6991     */
6992    public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal) {
6993        return floodFill_4(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3]);
6994    }
6995
6996
6997    //
6998    // C++:  void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst)
6999    //
7000
7001    /**
7002     *
7003     *
7004     * variant without {@code mask} parameter
7005     * @param src1 automatically generated
7006     * @param src2 automatically generated
7007     * @param weights1 automatically generated
7008     * @param weights2 automatically generated
7009     * @param dst automatically generated
7010     */
7011    public static void blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat dst) {
7012        blendLinear_0(src1.nativeObj, src2.nativeObj, weights1.nativeObj, weights2.nativeObj, dst.nativeObj);
7013    }
7014
7015
7016    //
7017    // C++:  void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0)
7018    //
7019
7020    /**
7021     * Converts an image from one color space to another.
7022     *
7023     * The function converts an input image from one color space to another. In case of a transformation
7024     * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
7025     * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
7026     * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
7027     * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
7028     * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
7029     *
7030     * The conventional ranges for R, G, and B channel values are:
7031     * <ul>
7032     *   <li>
7033     *    0 to 255 for CV_8U images
7034     *   </li>
7035     *   <li>
7036     *    0 to 65535 for CV_16U images
7037     *   </li>
7038     *   <li>
7039     *    0 to 1 for CV_32F images
7040     *   </li>
7041     * </ul>
7042     *
7043     * In case of linear transformations, the range does not matter. But in case of a non-linear
7044     * transformation, an input RGB image should be normalized to the proper value range to get the correct
7045     * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a
7046     * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
7047     * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
7048     * you need first to scale the image down:
7049     * <code>
7050     *     img *= 1./255;
7051     *     cvtColor(img, img, COLOR_BGR2Luv);
7052     * </code>
7053     * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
7054     * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
7055     * that need the full range of colors or that convert an image before an operation and then convert
7056     * back.
7057     *
7058     * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
7059     * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
7060     *
7061     * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
7062     * floating-point.
7063     * @param dst output image of the same size and depth as src.
7064     * @param code color space conversion code (see #ColorConversionCodes).
7065     * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
7066     * channels is derived automatically from src and code.
7067     *
7068     * SEE: REF: imgproc_color_conversions
7069     */
7070    public static void cvtColor(Mat src, Mat dst, int code, int dstCn) {
7071        cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn);
7072    }
7073
7074    /**
7075     * Converts an image from one color space to another.
7076     *
7077     * The function converts an input image from one color space to another. In case of a transformation
7078     * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note
7079     * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the
7080     * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue
7081     * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and
7082     * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.
7083     *
7084     * The conventional ranges for R, G, and B channel values are:
7085     * <ul>
7086     *   <li>
7087     *    0 to 255 for CV_8U images
7088     *   </li>
7089     *   <li>
7090     *    0 to 65535 for CV_16U images
7091     *   </li>
7092     *   <li>
7093     *    0 to 1 for CV_32F images
7094     *   </li>
7095     * </ul>
7096     *
7097     * In case of linear transformations, the range does not matter. But in case of a non-linear
7098     * transformation, an input RGB image should be normalized to the proper value range to get the correct
7099     * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a
7100     * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will
7101     * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,
7102     * you need first to scale the image down:
7103     * <code>
7104     *     img *= 1./255;
7105     *     cvtColor(img, img, COLOR_BGR2Luv);
7106     * </code>
7107     * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many
7108     * applications, this will not be noticeable but it is recommended to use 32-bit images in applications
7109     * that need the full range of colors or that convert an image before an operation and then convert
7110     * back.
7111     *
7112     * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel
7113     * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.
7114     *
7115     * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision
7116     * floating-point.
7117     * @param dst output image of the same size and depth as src.
7118     * @param code color space conversion code (see #ColorConversionCodes).
7119     * channels is derived automatically from src and code.
7120     *
7121     * SEE: REF: imgproc_color_conversions
7122     */
7123    public static void cvtColor(Mat src, Mat dst, int code) {
7124        cvtColor_1(src.nativeObj, dst.nativeObj, code);
7125    }
7126
7127
7128    //
7129    // C++:  void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code)
7130    //
7131
7132    /**
7133     * Converts an image from one color space to another where the source image is
7134     * stored in two planes.
7135     *
7136     * This function only supports YUV420 to RGB conversion as of now.
7137     *
7138     * <ul>
7139     *   <li>
7140     *  #COLOR_YUV2BGR_NV12
7141     *   </li>
7142     *   <li>
7143     *  #COLOR_YUV2RGB_NV12
7144     *   </li>
7145     *   <li>
7146     *  #COLOR_YUV2BGRA_NV12
7147     *   </li>
7148     *   <li>
7149     *  #COLOR_YUV2RGBA_NV12
7150     *   </li>
7151     *   <li>
7152     *  #COLOR_YUV2BGR_NV21
7153     *   </li>
7154     *   <li>
7155     *  #COLOR_YUV2RGB_NV21
7156     *   </li>
7157     *   <li>
7158     *  #COLOR_YUV2BGRA_NV21
7159     *   </li>
7160     *   <li>
7161     *  #COLOR_YUV2RGBA_NV21
7162     *   </li>
7163     * </ul>
7164     * @param src1 automatically generated
7165     * @param src2 automatically generated
7166     * @param dst automatically generated
7167     * @param code automatically generated
7168     */
7169    public static void cvtColorTwoPlane(Mat src1, Mat src2, Mat dst, int code) {
7170        cvtColorTwoPlane_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, code);
7171    }
7172
7173
7174    //
7175    // C++:  void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0)
7176    //
7177
7178    /**
7179     * main function for all demosaicing processes
7180     *
7181     * @param src input image: 8-bit unsigned or 16-bit unsigned.
7182     * @param dst output image of the same size and depth as src.
7183     * @param code Color space conversion code (see the description below).
7184     * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the
7185     * channels is derived automatically from src and code.
7186     *
7187     * The function can do the following transformations:
7188     *
7189     * <ul>
7190     *   <li>
7191     *    Demosaicing using bilinear interpolation
7192     *   </li>
7193     * </ul>
7194     *
7195     *     #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
7196     *
7197     *     #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
7198     *
7199     * <ul>
7200     *   <li>
7201     *    Demosaicing using Variable Number of Gradients.
7202     *   </li>
7203     * </ul>
7204     *
7205     *     #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
7206     *
7207     * <ul>
7208     *   <li>
7209     *    Edge-Aware Demosaicing.
7210     *   </li>
7211     * </ul>
7212     *
7213     *     #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
7214     *
7215     * <ul>
7216     *   <li>
7217     *    Demosaicing with alpha channel
7218     *   </li>
7219     * </ul>
7220     *
7221     *     #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
7222     *
7223     * SEE: cvtColor
7224     */
7225    public static void demosaicing(Mat src, Mat dst, int code, int dstCn) {
7226        demosaicing_0(src.nativeObj, dst.nativeObj, code, dstCn);
7227    }
7228
7229    /**
7230     * main function for all demosaicing processes
7231     *
7232     * @param src input image: 8-bit unsigned or 16-bit unsigned.
7233     * @param dst output image of the same size and depth as src.
7234     * @param code Color space conversion code (see the description below).
7235     * channels is derived automatically from src and code.
7236     *
7237     * The function can do the following transformations:
7238     *
7239     * <ul>
7240     *   <li>
7241     *    Demosaicing using bilinear interpolation
7242     *   </li>
7243     * </ul>
7244     *
7245     *     #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR
7246     *
7247     *     #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY
7248     *
7249     * <ul>
7250     *   <li>
7251     *    Demosaicing using Variable Number of Gradients.
7252     *   </li>
7253     * </ul>
7254     *
7255     *     #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG
7256     *
7257     * <ul>
7258     *   <li>
7259     *    Edge-Aware Demosaicing.
7260     *   </li>
7261     * </ul>
7262     *
7263     *     #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA
7264     *
7265     * <ul>
7266     *   <li>
7267     *    Demosaicing with alpha channel
7268     *   </li>
7269     * </ul>
7270     *
7271     *     #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA
7272     *
7273     * SEE: cvtColor
7274     */
7275    public static void demosaicing(Mat src, Mat dst, int code) {
7276        demosaicing_1(src.nativeObj, dst.nativeObj, code);
7277    }
7278
7279
7280    //
7281    // C++:  Moments cv::moments(Mat array, bool binaryImage = false)
7282    //
7283
7284    /**
7285     * Calculates all of the moments up to the third order of a polygon or rasterized shape.
7286     *
7287     * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
7288     * results are returned in the structure cv::Moments.
7289     *
7290     * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
7291     * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ).
7292     * @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is
7293     * used for images only.
7294     * @return moments.
7295     *
7296     * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy
7297     * type for the input array should be either np.int32 or np.float32.
7298     *
7299     * SEE:  contourArea, arcLength
7300     */
7301    public static Moments moments(Mat array, boolean binaryImage) {
7302        return new Moments(moments_0(array.nativeObj, binaryImage));
7303    }
7304
7305    /**
7306     * Calculates all of the moments up to the third order of a polygon or rasterized shape.
7307     *
7308     * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The
7309     * results are returned in the structure cv::Moments.
7310     *
7311     * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (
7312     * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ).
7313     * used for images only.
7314     * @return moments.
7315     *
7316     * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy
7317     * type for the input array should be either np.int32 or np.float32.
7318     *
7319     * SEE:  contourArea, arcLength
7320     */
7321    public static Moments moments(Mat array) {
7322        return new Moments(moments_1(array.nativeObj));
7323    }
7324
7325
7326    //
7327    // C++:  void cv::HuMoments(Moments m, Mat& hu)
7328    //
7329
7330    public static void HuMoments(Moments m, Mat hu) {
7331        HuMoments_0(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03, hu.nativeObj);
7332    }
7333
7334
7335    //
7336    // C++:  void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat())
7337    //
7338
7339    /**
7340     * Compares a template against overlapped image regions.
7341     *
7342     * The function slides through image , compares the overlapped patches of size \(w \times h\) against
7343     * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
7344     * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\)
7345     * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or
7346     * the image patch: \(x' = 0...w-1, y' = 0...h-1\)
7347     *
7348     * After the function finishes the comparison, the best matches can be found as global minimums (when
7349     * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
7350     * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
7351     * the denominator is done over all of the channels and separate mean values are used for each channel.
7352     * That is, the function can take a color template and a color image. The result will still be a
7353     * single-channel image, which is easier to analyze.
7354     *
7355     * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
7356     * @param templ Searched template. It must be not greater than the source image and have the same
7357     * data type.
7358     * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
7359     * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) .
7360     * @param method Parameter specifying the comparison method, see #TemplateMatchModes
7361     * @param mask Optional mask. It must have the same size as templ. It must either have the same number
7362     *             of channels as template or only one channel, which is then used for all template and
7363     *             image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
7364     *             meaning only elements where mask is nonzero are used and are kept unchanged independent
7365     *             of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
7366     *             used as weights. The exact formulas are documented in #TemplateMatchModes.
7367     */
7368    public static void matchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) {
7369        matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method, mask.nativeObj);
7370    }
7371
7372    /**
7373     * Compares a template against overlapped image regions.
7374     *
7375     * The function slides through image , compares the overlapped patches of size \(w \times h\) against
7376     * templ using the specified method and stores the comparison results in result . #TemplateMatchModes
7377     * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\)
7378     * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or
7379     * the image patch: \(x' = 0...w-1, y' = 0...h-1\)
7380     *
7381     * After the function finishes the comparison, the best matches can be found as global minimums (when
7382     * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the
7383     * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in
7384     * the denominator is done over all of the channels and separate mean values are used for each channel.
7385     * That is, the function can take a color template and a color image. The result will still be a
7386     * single-channel image, which is easier to analyze.
7387     *
7388     * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.
7389     * @param templ Searched template. It must be not greater than the source image and have the same
7390     * data type.
7391     * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image
7392     * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) .
7393     * @param method Parameter specifying the comparison method, see #TemplateMatchModes
7394     *             of channels as template or only one channel, which is then used for all template and
7395     *             image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,
7396     *             meaning only elements where mask is nonzero are used and are kept unchanged independent
7397     *             of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are
7398     *             used as weights. The exact formulas are documented in #TemplateMatchModes.
7399     */
7400    public static void matchTemplate(Mat image, Mat templ, Mat result, int method) {
7401        matchTemplate_1(image.nativeObj, templ.nativeObj, result.nativeObj, method);
7402    }
7403
7404
7405    //
7406    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype)
7407    //
7408
7409    /**
7410     * computes the connected components labeled image of boolean image
7411     *
7412     * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
7413     * represents the background label. ltype specifies the output label image type, an important
7414     * consideration based on the total number of labels or alternatively the total number of pixels in
7415     * the source image. ccltype specifies the connected components labeling algorithm to use, currently
7416     * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms
7417     * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces
7418     * a row major ordering of labels while Spaghetti and BBDT do not.
7419     * This function uses parallel version of the algorithms if at least one allowed
7420     * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
7421     *
7422     * @param image the 8-bit single-channel image to be labeled
7423     * @param labels destination labeled image
7424     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7425     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7426     * @param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes).
7427     * @return automatically generated
7428     */
7429    public static int connectedComponentsWithAlgorithm(Mat image, Mat labels, int connectivity, int ltype, int ccltype) {
7430        return connectedComponentsWithAlgorithm_0(image.nativeObj, labels.nativeObj, connectivity, ltype, ccltype);
7431    }
7432
7433
7434    //
7435    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S)
7436    //
7437
7438    /**
7439     *
7440     *
7441     * @param image the 8-bit single-channel image to be labeled
7442     * @param labels destination labeled image
7443     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7444     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7445     * @return automatically generated
7446     */
7447    public static int connectedComponents(Mat image, Mat labels, int connectivity, int ltype) {
7448        return connectedComponents_0(image.nativeObj, labels.nativeObj, connectivity, ltype);
7449    }
7450
7451    /**
7452     *
7453     *
7454     * @param image the 8-bit single-channel image to be labeled
7455     * @param labels destination labeled image
7456     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7457     * @return automatically generated
7458     */
7459    public static int connectedComponents(Mat image, Mat labels, int connectivity) {
7460        return connectedComponents_1(image.nativeObj, labels.nativeObj, connectivity);
7461    }
7462
7463    /**
7464     *
7465     *
7466     * @param image the 8-bit single-channel image to be labeled
7467     * @param labels destination labeled image
7468     * @return automatically generated
7469     */
7470    public static int connectedComponents(Mat image, Mat labels) {
7471        return connectedComponents_2(image.nativeObj, labels.nativeObj);
7472    }
7473
7474
7475    //
7476    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype)
7477    //
7478
7479    /**
7480     * computes the connected components labeled image of boolean image and also produces a statistics output for each label
7481     *
7482     * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0
7483     * represents the background label. ltype specifies the output label image type, an important
7484     * consideration based on the total number of labels or alternatively the total number of pixels in
7485     * the source image. ccltype specifies the connected components labeling algorithm to use, currently
7486     * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms
7487     * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces
7488     * a row major ordering of labels while Spaghetti and BBDT do not.
7489     * This function uses parallel version of the algorithms (statistics included) if at least one allowed
7490     * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.
7491     *
7492     * @param image the 8-bit single-channel image to be labeled
7493     * @param labels destination labeled image
7494     * @param stats statistics output for each label, including the background label.
7495     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7496     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7497     * @param centroids centroid output for each label, including the background label. Centroids are
7498     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7499     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7500     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7501     * @param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes).
7502     * @return automatically generated
7503     */
7504    public static int connectedComponentsWithStatsWithAlgorithm(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype) {
7505        return connectedComponentsWithStatsWithAlgorithm_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype, ccltype);
7506    }
7507
7508
7509    //
7510    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S)
7511    //
7512
7513    /**
7514     *
7515     * @param image the 8-bit single-channel image to be labeled
7516     * @param labels destination labeled image
7517     * @param stats statistics output for each label, including the background label.
7518     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7519     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7520     * @param centroids centroid output for each label, including the background label. Centroids are
7521     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7522     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7523     * @param ltype output image label type. Currently CV_32S and CV_16U are supported.
7524     * @return automatically generated
7525     */
7526    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype) {
7527        return connectedComponentsWithStats_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype);
7528    }
7529
7530    /**
7531     *
7532     * @param image the 8-bit single-channel image to be labeled
7533     * @param labels destination labeled image
7534     * @param stats statistics output for each label, including the background label.
7535     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7536     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7537     * @param centroids centroid output for each label, including the background label. Centroids are
7538     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7539     * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively
7540     * @return automatically generated
7541     */
7542    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity) {
7543        return connectedComponentsWithStats_1(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity);
7544    }
7545
7546    /**
7547     *
7548     * @param image the 8-bit single-channel image to be labeled
7549     * @param labels destination labeled image
7550     * @param stats statistics output for each label, including the background label.
7551     * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of
7552     * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.
7553     * @param centroids centroid output for each label, including the background label. Centroids are
7554     * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.
7555     * @return automatically generated
7556     */
7557    public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids) {
7558        return connectedComponentsWithStats_2(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj);
7559    }
7560
7561
7562    //
7563    // C++:  void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point())
7564    //
7565
7566    /**
7567     * Finds contours in a binary image.
7568     *
7569     * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
7570     * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
7571     * OpenCV sample directory.
7572     * <b>Note:</b> Since opencv 3.2 source image is not modified by this function.
7573     *
7574     * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
7575     * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
7576     * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
7577     * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
7578     * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
7579     * std::vector&lt;std::vector&lt;cv::Point&gt; &gt;).
7580     * @param hierarchy Optional output vector (e.g. std::vector&lt;cv::Vec4i&gt;), containing information about the image topology. It has
7581     * as many elements as the number of contours. For each i-th contour contours[i], the elements
7582     * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
7583     * in contours of the next and previous contours at the same hierarchical level, the first child
7584     * contour and the parent contour, respectively. If for the contour i there are no next, previous,
7585     * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
7586     * <b>Note:</b> In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.
7587     * @param mode Contour retrieval mode, see #RetrievalModes
7588     * @param method Contour approximation method, see #ContourApproximationModes
7589     * @param offset Optional offset by which every contour point is shifted. This is useful if the
7590     * contours are extracted from the image ROI and then they should be analyzed in the whole image
7591     * context.
7592     */
7593    public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method, Point offset) {
7594        Mat contours_mat = new Mat();
7595        findContours_0(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method, offset.x, offset.y);
7596        Converters.Mat_to_vector_vector_Point(contours_mat, contours);
7597        contours_mat.release();
7598    }
7599
7600    /**
7601     * Finds contours in a binary image.
7602     *
7603     * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours
7604     * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the
7605     * OpenCV sample directory.
7606     * <b>Note:</b> Since opencv 3.2 source image is not modified by this function.
7607     *
7608     * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero
7609     * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
7610     * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
7611     * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).
7612     * @param contours Detected contours. Each contour is stored as a vector of points (e.g.
7613     * std::vector&lt;std::vector&lt;cv::Point&gt; &gt;).
7614     * @param hierarchy Optional output vector (e.g. std::vector&lt;cv::Vec4i&gt;), containing information about the image topology. It has
7615     * as many elements as the number of contours. For each i-th contour contours[i], the elements
7616     * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices
7617     * in contours of the next and previous contours at the same hierarchical level, the first child
7618     * contour and the parent contour, respectively. If for the contour i there are no next, previous,
7619     * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
7620     * <b>Note:</b> In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.
7621     * @param mode Contour retrieval mode, see #RetrievalModes
7622     * @param method Contour approximation method, see #ContourApproximationModes
7623     * contours are extracted from the image ROI and then they should be analyzed in the whole image
7624     * context.
7625     */
7626    public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method) {
7627        Mat contours_mat = new Mat();
7628        findContours_1(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method);
7629        Converters.Mat_to_vector_vector_Point(contours_mat, contours);
7630        contours_mat.release();
7631    }
7632
7633
7634    //
7635    // C++:  void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
7636    //
7637
7638    /**
7639     * Approximates a polygonal curve(s) with the specified precision.
7640     *
7641     * The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less
7642     * vertices so that the distance between them is less or equal to the specified precision. It uses the
7643     * Douglas-Peucker algorithm &lt;http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm&gt;
7644     *
7645     * @param curve Input vector of a 2D point stored in std::vector or Mat
7646     * @param approxCurve Result of the approximation. The type should match the type of the input curve.
7647     * @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance
7648     * between the original curve and its approximation.
7649     * @param closed If true, the approximated curve is closed (its first and last vertices are
7650     * connected). Otherwise, it is not closed.
7651     */
7652    public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) {
7653        Mat curve_mat = curve;
7654        Mat approxCurve_mat = approxCurve;
7655        approxPolyDP_0(curve_mat.nativeObj, approxCurve_mat.nativeObj, epsilon, closed);
7656    }
7657
7658
7659    //
7660    // C++:  double cv::arcLength(vector_Point2f curve, bool closed)
7661    //
7662
7663    /**
7664     * Calculates a contour perimeter or a curve length.
7665     *
7666     * The function computes a curve length or a closed contour perimeter.
7667     *
7668     * @param curve Input vector of 2D points, stored in std::vector or Mat.
7669     * @param closed Flag indicating whether the curve is closed or not.
7670     * @return automatically generated
7671     */
7672    public static double arcLength(MatOfPoint2f curve, boolean closed) {
7673        Mat curve_mat = curve;
7674        return arcLength_0(curve_mat.nativeObj, closed);
7675    }
7676
7677
7678    //
7679    // C++:  Rect cv::boundingRect(Mat array)
7680    //
7681
7682    /**
7683     * Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.
7684     *
7685     * The function calculates and returns the minimal up-right bounding rectangle for the specified point set or
7686     * non-zero pixels of gray-scale image.
7687     *
7688     * @param array Input gray-scale image or 2D point set, stored in std::vector or Mat.
7689     * @return automatically generated
7690     */
7691    public static Rect boundingRect(Mat array) {
7692        return new Rect(boundingRect_0(array.nativeObj));
7693    }
7694
7695
7696    //
7697    // C++:  double cv::contourArea(Mat contour, bool oriented = false)
7698    //
7699
7700    /**
7701     * Calculates a contour area.
7702     *
7703     * The function computes a contour area. Similarly to moments , the area is computed using the Green
7704     * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
7705     * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
7706     * results for contours with self-intersections.
7707     *
7708     * Example:
7709     * <code>
7710     *     vector&lt;Point&gt; contour;
7711     *     contour.push_back(Point2f(0, 0));
7712     *     contour.push_back(Point2f(10, 0));
7713     *     contour.push_back(Point2f(10, 10));
7714     *     contour.push_back(Point2f(5, 4));
7715     *
7716     *     double area0 = contourArea(contour);
7717     *     vector&lt;Point&gt; approx;
7718     *     approxPolyDP(contour, approx, 5, true);
7719     *     double area1 = contourArea(approx);
7720     *
7721     *     cout &lt;&lt; "area0 =" &lt;&lt; area0 &lt;&lt; endl &lt;&lt;
7722     *             "area1 =" &lt;&lt; area1 &lt;&lt; endl &lt;&lt;
7723     *             "approx poly vertices" &lt;&lt; approx.size() &lt;&lt; endl;
7724     * </code>
7725     * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
7726     * @param oriented Oriented area flag. If it is true, the function returns a signed area value,
7727     * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
7728     * determine orientation of a contour by taking the sign of an area. By default, the parameter is
7729     * false, which means that the absolute value is returned.
7730     * @return automatically generated
7731     */
7732    public static double contourArea(Mat contour, boolean oriented) {
7733        return contourArea_0(contour.nativeObj, oriented);
7734    }
7735
7736    /**
7737     * Calculates a contour area.
7738     *
7739     * The function computes a contour area. Similarly to moments , the area is computed using the Green
7740     * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using
7741     * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong
7742     * results for contours with self-intersections.
7743     *
7744     * Example:
7745     * <code>
7746     *     vector&lt;Point&gt; contour;
7747     *     contour.push_back(Point2f(0, 0));
7748     *     contour.push_back(Point2f(10, 0));
7749     *     contour.push_back(Point2f(10, 10));
7750     *     contour.push_back(Point2f(5, 4));
7751     *
7752     *     double area0 = contourArea(contour);
7753     *     vector&lt;Point&gt; approx;
7754     *     approxPolyDP(contour, approx, 5, true);
7755     *     double area1 = contourArea(approx);
7756     *
7757     *     cout &lt;&lt; "area0 =" &lt;&lt; area0 &lt;&lt; endl &lt;&lt;
7758     *             "area1 =" &lt;&lt; area1 &lt;&lt; endl &lt;&lt;
7759     *             "approx poly vertices" &lt;&lt; approx.size() &lt;&lt; endl;
7760     * </code>
7761     * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.
7762     * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can
7763     * determine orientation of a contour by taking the sign of an area. By default, the parameter is
7764     * false, which means that the absolute value is returned.
7765     * @return automatically generated
7766     */
7767    public static double contourArea(Mat contour) {
7768        return contourArea_1(contour.nativeObj);
7769    }
7770
7771
7772    //
7773    // C++:  RotatedRect cv::minAreaRect(vector_Point2f points)
7774    //
7775
7776    /**
7777     * Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
7778     *
7779     * The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a
7780     * specified point set. Developer should keep in mind that the returned RotatedRect can contain negative
7781     * indices when data is close to the containing Mat element boundary.
7782     *
7783     * @param points Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7784     * @return automatically generated
7785     */
7786    public static RotatedRect minAreaRect(MatOfPoint2f points) {
7787        Mat points_mat = points;
7788        return new RotatedRect(minAreaRect_0(points_mat.nativeObj));
7789    }
7790
7791
7792    //
7793    // C++:  void cv::boxPoints(RotatedRect box, Mat& points)
7794    //
7795
7796    /**
7797     * Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.
7798     *
7799     * The function finds the four vertices of a rotated rectangle. This function is useful to draw the
7800     * rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please
7801     * visit the REF: tutorial_bounding_rotated_ellipses "tutorial on Creating Bounding rotated boxes and ellipses for contours" for more information.
7802     *
7803     * @param box The input rotated rectangle. It may be the output of REF: minAreaRect.
7804     * @param points The output array of four vertices of rectangles.
7805     */
7806    public static void boxPoints(RotatedRect box, Mat points) {
7807        boxPoints_0(box.center.x, box.center.y, box.size.width, box.size.height, box.angle, points.nativeObj);
7808    }
7809
7810
7811    //
7812    // C++:  void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
7813    //
7814
7815    /**
7816     * Finds a circle of the minimum area enclosing a 2D point set.
7817     *
7818     * The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm.
7819     *
7820     * @param points Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7821     * @param center Output center of the circle.
7822     * @param radius Output radius of the circle.
7823     */
7824    public static void minEnclosingCircle(MatOfPoint2f points, Point center, float[] radius) {
7825        Mat points_mat = points;
7826        double[] center_out = new double[2];
7827        double[] radius_out = new double[1];
7828        minEnclosingCircle_0(points_mat.nativeObj, center_out, radius_out);
7829        if(center!=null){ center.x = center_out[0]; center.y = center_out[1]; } 
7830        if(radius!=null) radius[0] = (float)radius_out[0];
7831    }
7832
7833
7834    //
7835    // C++:  double cv::minEnclosingTriangle(Mat points, Mat& triangle)
7836    //
7837
7838    /**
7839     * Finds a triangle of minimum area enclosing a 2D point set and returns its area.
7840     *
7841     * The function finds a triangle of minimum area enclosing the given set of 2D points and returns its
7842     * area. The output for a given 2D point set is shown in the image below. 2D points are depicted in
7843     * red* and the enclosing triangle in *yellow*.
7844     *
7845     * ![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png)
7846     *
7847     * The implementation of the algorithm is based on O'Rourke's CITE: ORourke86 and Klee and Laskowski's
7848     * CITE: KleeLaskowski85 papers. O'Rourke provides a \(\theta(n)\) algorithm for finding the minimal
7849     * enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function
7850     * takes a 2D point set as input an additional preprocessing step of computing the convex hull of the
7851     * 2D point set is required. The complexity of the #convexHull function is \(O(n log(n))\) which is higher
7852     * than \(\theta(n)\). Thus the overall complexity of the function is \(O(n log(n))\).
7853     *
7854     * @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector&lt;&gt; or Mat
7855     * @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth
7856     * of the OutputArray must be CV_32F.
7857     * @return automatically generated
7858     */
7859    public static double minEnclosingTriangle(Mat points, Mat triangle) {
7860        return minEnclosingTriangle_0(points.nativeObj, triangle.nativeObj);
7861    }
7862
7863
7864    //
7865    // C++:  double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter)
7866    //
7867
7868    /**
7869     * Compares two shapes.
7870     *
7871     * The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments)
7872     *
7873     * @param contour1 First contour or grayscale image.
7874     * @param contour2 Second contour or grayscale image.
7875     * @param method Comparison method, see #ShapeMatchModes
7876     * @param parameter Method-specific parameter (not supported now).
7877     * @return automatically generated
7878     */
7879    public static double matchShapes(Mat contour1, Mat contour2, int method, double parameter) {
7880        return matchShapes_0(contour1.nativeObj, contour2.nativeObj, method, parameter);
7881    }
7882
7883
7884    //
7885    // C++:  void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false,  _hidden_  returnPoints = true)
7886    //
7887
7888    /**
7889     * Finds the convex hull of a point set.
7890     *
7891     * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
7892     * that has *O(N logN)* complexity in the current implementation.
7893     *
7894     * @param points Input 2D point set, stored in std::vector or Mat.
7895     * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
7896     * the first case, the hull elements are 0-based indices of the convex hull points in the original
7897     * array (since the set of convex hull points is a subset of the original point set). In the second
7898     * case, hull elements are the convex hull points themselves.
7899     * @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.
7900     * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
7901     * to the right, and its Y axis pointing upwards.
7902     * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
7903     * output array is std::vector, the flag is ignored, and the output depends on the type of the
7904     * vector: std::vector&lt;int&gt; implies returnPoints=false, std::vector&lt;Point&gt; implies
7905     * returnPoints=true.
7906     *
7907     * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported.
7908     *
7909     * Check REF: tutorial_hull "the corresponding tutorial" for more details.
7910     *
7911     * useful links:
7912     *
7913     * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
7914     */
7915    public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise) {
7916        Mat points_mat = points;
7917        Mat hull_mat = hull;
7918        convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise);
7919    }
7920
7921    /**
7922     * Finds the convex hull of a point set.
7923     *
7924     * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82
7925     * that has *O(N logN)* complexity in the current implementation.
7926     *
7927     * @param points Input 2D point set, stored in std::vector or Mat.
7928     * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In
7929     * the first case, the hull elements are 0-based indices of the convex hull points in the original
7930     * array (since the set of convex hull points is a subset of the original point set). In the second
7931     * case, hull elements are the convex hull points themselves.
7932     * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing
7933     * to the right, and its Y axis pointing upwards.
7934     * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the
7935     * output array is std::vector, the flag is ignored, and the output depends on the type of the
7936     * vector: std::vector&lt;int&gt; implies returnPoints=false, std::vector&lt;Point&gt; implies
7937     * returnPoints=true.
7938     *
7939     * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported.
7940     *
7941     * Check REF: tutorial_hull "the corresponding tutorial" for more details.
7942     *
7943     * useful links:
7944     *
7945     * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/
7946     */
7947    public static void convexHull(MatOfPoint points, MatOfInt hull) {
7948        Mat points_mat = points;
7949        Mat hull_mat = hull;
7950        convexHull_2(points_mat.nativeObj, hull_mat.nativeObj);
7951    }
7952
7953
7954    //
7955    // C++:  void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
7956    //
7957
7958    /**
7959     * Finds the convexity defects of a contour.
7960     *
7961     * The figure below displays convexity defects of a hand contour:
7962     *
7963     * ![image](pics/defects.png)
7964     *
7965     * @param contour Input contour.
7966     * @param convexhull Convex hull obtained using convexHull that should contain indices of the contour
7967     * points that make the hull.
7968     * @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java
7969     * interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i):
7970     * (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices
7971     * in the original contour of the convexity defect beginning, end and the farthest point, and
7972     * fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the
7973     * farthest contour point and the hull. That is, to get the floating-point value of the depth will be
7974     * fixpt_depth/256.0.
7975     */
7976    public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects) {
7977        Mat contour_mat = contour;
7978        Mat convexhull_mat = convexhull;
7979        Mat convexityDefects_mat = convexityDefects;
7980        convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj);
7981    }
7982
7983
7984    //
7985    // C++:  bool cv::isContourConvex(vector_Point contour)
7986    //
7987
7988    /**
7989     * Tests a contour convexity.
7990     *
7991     * The function tests whether the input contour is convex or not. The contour must be simple, that is,
7992     * without self-intersections. Otherwise, the function output is undefined.
7993     *
7994     * @param contour Input vector of 2D points, stored in std::vector&lt;&gt; or Mat
7995     * @return automatically generated
7996     */
7997    public static boolean isContourConvex(MatOfPoint contour) {
7998        Mat contour_mat = contour;
7999        return isContourConvex_0(contour_mat.nativeObj);
8000    }
8001
8002
8003    //
8004    // C++:  float cv::intersectConvexConvex(Mat p1, Mat p2, Mat& p12, bool handleNested = true)
8005    //
8006
8007    /**
8008     * Finds intersection of two convex polygons
8009     *
8010     * @param p1 First polygon
8011     * @param p2 Second polygon
8012     * @param p12 Output polygon describing the intersecting area
8013     * @param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other.
8014     * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
8015     * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
8016     *
8017     * @return Absolute value of area of intersecting polygon
8018     *
8019     * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
8020     */
8021    public static float intersectConvexConvex(Mat p1, Mat p2, Mat p12, boolean handleNested) {
8022        return intersectConvexConvex_0(p1.nativeObj, p2.nativeObj, p12.nativeObj, handleNested);
8023    }
8024
8025    /**
8026     * Finds intersection of two convex polygons
8027     *
8028     * @param p1 First polygon
8029     * @param p2 Second polygon
8030     * @param p12 Output polygon describing the intersecting area
8031     * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge
8032     * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.
8033     *
8034     * @return Absolute value of area of intersecting polygon
8035     *
8036     * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't.
8037     */
8038    public static float intersectConvexConvex(Mat p1, Mat p2, Mat p12) {
8039        return intersectConvexConvex_1(p1.nativeObj, p2.nativeObj, p12.nativeObj);
8040    }
8041
8042
8043    //
8044    // C++:  RotatedRect cv::fitEllipse(vector_Point2f points)
8045    //
8046
8047    /**
8048     * Fits an ellipse around a set of 2D points.
8049     *
8050     * The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of
8051     * all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by CITE: Fitzgibbon95
8052     * is used. Developer should keep in mind that it is possible that the returned
8053     * ellipse/rotatedRect data contains negative indices, due to the data points being close to the
8054     * border of the containing Mat element.
8055     *
8056     * @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
8057     * @return automatically generated
8058     */
8059    public static RotatedRect fitEllipse(MatOfPoint2f points) {
8060        Mat points_mat = points;
8061        return new RotatedRect(fitEllipse_0(points_mat.nativeObj));
8062    }
8063
8064
8065    //
8066    // C++:  RotatedRect cv::fitEllipseAMS(Mat points)
8067    //
8068
8069    /**
8070     * Fits an ellipse around a set of 2D points.
8071     *
8072     *  The function calculates the ellipse that fits a set of 2D points.
8073     *  It returns the rotated rectangle in which the ellipse is inscribed.
8074     *  The Approximate Mean Square (AMS) proposed by CITE: Taubin1991 is used.
8075     *
8076     *  For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \),
8077     *  which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \).
8078     *  However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \),
8079     *  the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines,
8080     *  quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
8081     *  If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used.
8082     *  The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves
8083     *  by imposing the condition that \( A^T ( D_x^T D_x  +   D_y^T D_y) A = 1 \) where
8084     *  the matrices \( Dx \) and \( Dy \) are the partial derivatives of the design matrix \( D \) with
8085     *  respect to x and y. The matrices are formed row by row applying the following to
8086     *  each of the points in the set:
8087     *  \(align*}{
8088     *  D(i,:)&amp;=\left\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\right\} &amp;
8089     *  D_x(i,:)&amp;=\left\{2 x_i,y_i,0,1,0,0\right\} &amp;
8090     *  D_y(i,:)&amp;=\left\{0,x_i,2 y_i,0,1,0\right\}
8091     *  \)
8092     *  The AMS method minimizes the cost function
8093     *  \(equation*}{
8094     *  \epsilon ^2=\frac{ A^T D^T D A }{ A^T (D_x^T D_x +  D_y^T D_y) A^T }
8095     *  \)
8096     *
8097     *  The minimum cost is found by solving the generalized eigenvalue problem.
8098     *
8099     *  \(equation*}{
8100     *  D^T D A = \lambda  \left( D_x^T D_x +  D_y^T D_y\right) A
8101     *  \)
8102     *
8103     *  @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
8104     * @return automatically generated
8105     */
8106    public static RotatedRect fitEllipseAMS(Mat points) {
8107        return new RotatedRect(fitEllipseAMS_0(points.nativeObj));
8108    }
8109
8110
8111    //
8112    // C++:  RotatedRect cv::fitEllipseDirect(Mat points)
8113    //
8114
8115    /**
8116     * Fits an ellipse around a set of 2D points.
8117     *
8118     *  The function calculates the ellipse that fits a set of 2D points.
8119     *  It returns the rotated rectangle in which the ellipse is inscribed.
8120     *  The Direct least square (Direct) method by CITE: Fitzgibbon1999 is used.
8121     *
8122     *  For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \),
8123     *  which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \).
8124     *  However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \),
8125     *  the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines,
8126     *  quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.
8127     *  The Direct method confines the fit to ellipses by ensuring that \( 4 A_{xx} A_{yy}- A_{xy}^2 &gt; 0 \).
8128     *  The condition imposed is that \( 4 A_{xx} A_{yy}- A_{xy}^2=1 \) which satisfies the inequality
8129     *  and as the coefficients can be arbitrarily scaled is not overly restrictive.
8130     *
8131     *  \(equation*}{
8132     *  \epsilon ^2= A^T D^T D A \quad \text{with} \quad A^T C A =1 \quad \text{and} \quad C=\left(\begin{matrix}
8133     *  0 &amp; 0  &amp; 2  &amp; 0  &amp; 0  &amp;  0  \\
8134     *  0 &amp; -1  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8135     *  2 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8136     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8137     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0 \\
8138     *  0 &amp; 0  &amp; 0  &amp; 0  &amp; 0  &amp;  0
8139     *  \end{matrix} \right)
8140     *  \)
8141     *
8142     *  The minimum cost is found by solving the generalized eigenvalue problem.
8143     *
8144     *  \(equation*}{
8145     *  D^T D A = \lambda  \left( C\right) A
8146     *  \)
8147     *
8148     *  The system produces only one positive eigenvalue \( \lambda\) which is chosen as the solution
8149     *  with its eigenvector \(\mathbf{u}\). These are used to find the coefficients
8150     *
8151     *  \(equation*}{
8152     *  A = \sqrt{\frac{1}{\mathbf{u}^T C \mathbf{u}}}  \mathbf{u}
8153     *  \)
8154     *  The scaling factor guarantees that  \(A^T C A =1\).
8155     *
8156     *  @param points Input 2D point set, stored in std::vector&lt;&gt; or Mat
8157     * @return automatically generated
8158     */
8159    public static RotatedRect fitEllipseDirect(Mat points) {
8160        return new RotatedRect(fitEllipseDirect_0(points.nativeObj));
8161    }
8162
8163
8164    //
8165    // C++:  void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps)
8166    //
8167
8168    /**
8169     * Fits a line to a 2D or 3D point set.
8170     *
8171     * The function fitLine fits a line to a 2D or 3D point set by minimizing \(\sum_i \rho(r_i)\) where
8172     * \(r_i\) is a distance between the \(i^{th}\) point, the line and \(\rho(r)\) is a distance function, one
8173     * of the following:
8174     * <ul>
8175     *   <li>
8176     *   DIST_L2
8177     * \(\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\)
8178     *   </li>
8179     *   <li>
8180     *  DIST_L1
8181     * \(\rho (r) = r\)
8182     *   </li>
8183     *   <li>
8184     *  DIST_L12
8185     * \(\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\)
8186     *   </li>
8187     *   <li>
8188     *  DIST_FAIR
8189     * \(\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\)
8190     *   </li>
8191     *   <li>
8192     *  DIST_WELSCH
8193     * \(\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\)
8194     *   </li>
8195     *   <li>
8196     *  DIST_HUBER
8197     * \(\rho (r) =  \fork{r^2/2}{if \(r &lt; C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\)
8198     *   </li>
8199     * </ul>
8200     *
8201     * The algorithm is based on the M-estimator ( &lt;http://en.wikipedia.org/wiki/M-estimator&gt; ) technique
8202     * that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
8203     * weights \(w_i\) are adjusted to be inversely proportional to \(\rho(r_i)\) .
8204     *
8205     * @param points Input vector of 2D or 3D points, stored in std::vector&lt;&gt; or Mat.
8206     * @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements
8207     * (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and
8208     * (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like
8209     * Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line
8210     * and (x0, y0, z0) is a point on the line.
8211     * @param distType Distance used by the M-estimator, see #DistanceTypes
8212     * @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
8213     * is chosen.
8214     * @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line).
8215     * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps.
8216     */
8217    public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps) {
8218        fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps);
8219    }
8220
8221
8222    //
8223    // C++:  double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
8224    //
8225
8226    /**
8227     * Performs a point-in-contour test.
8228     *
8229     * The function determines whether the point is inside a contour, outside, or lies on an edge (or
8230     * coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)
8231     * value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.
8232     * Otherwise, the return value is a signed distance between the point and the nearest contour edge.
8233     *
8234     * See below a sample output of the function where each image pixel is tested against the contour:
8235     *
8236     * ![sample output](pics/pointpolygon.png)
8237     *
8238     * @param contour Input contour.
8239     * @param pt Point tested against the contour.
8240     * @param measureDist If true, the function estimates the signed distance from the point to the
8241     * nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
8242     * @return automatically generated
8243     */
8244    public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) {
8245        Mat contour_mat = contour;
8246        return pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist);
8247    }
8248
8249
8250    //
8251    // C++:  int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion)
8252    //
8253
8254    /**
8255     * Finds out if there is any intersection between two rotated rectangles.
8256     *
8257     * If there is then the vertices of the intersecting region are returned as well.
8258     *
8259     * Below are some examples of intersection configurations. The hatched pattern indicates the
8260     * intersecting region and the red vertices are returned by the function.
8261     *
8262     * ![intersection examples](pics/intersection.png)
8263     *
8264     * @param rect1 First rectangle
8265     * @param rect2 Second rectangle
8266     * @param intersectingRegion The output array of the vertices of the intersecting region. It returns
8267     * at most 8 vertices. Stored as std::vector&lt;cv::Point2f&gt; or cv::Mat as Mx1 of type CV_32FC2.
8268     * @return One of #RectanglesIntersectTypes
8269     */
8270    public static int rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat intersectingRegion) {
8271        return rotatedRectangleIntersection_0(rect1.center.x, rect1.center.y, rect1.size.width, rect1.size.height, rect1.angle, rect2.center.x, rect2.center.y, rect2.size.width, rect2.size.height, rect2.angle, intersectingRegion.nativeObj);
8272    }
8273
8274
8275    //
8276    // C++:  Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard()
8277    //
8278
8279    /**
8280     * Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it.
8281     * @return automatically generated
8282     */
8283    public static GeneralizedHoughBallard createGeneralizedHoughBallard() {
8284        return GeneralizedHoughBallard.__fromPtr__(createGeneralizedHoughBallard_0());
8285    }
8286
8287
8288    //
8289    // C++:  Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil()
8290    //
8291
8292    /**
8293     * Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it.
8294     * @return automatically generated
8295     */
8296    public static GeneralizedHoughGuil createGeneralizedHoughGuil() {
8297        return GeneralizedHoughGuil.__fromPtr__(createGeneralizedHoughGuil_0());
8298    }
8299
8300
8301    //
8302    // C++:  void cv::applyColorMap(Mat src, Mat& dst, int colormap)
8303    //
8304
8305    /**
8306     * Applies a GNU Octave/MATLAB equivalent colormap on a given image.
8307     *
8308     * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
8309     * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
8310     * @param colormap The colormap to apply, see #ColormapTypes
8311     */
8312    public static void applyColorMap(Mat src, Mat dst, int colormap) {
8313        applyColorMap_0(src.nativeObj, dst.nativeObj, colormap);
8314    }
8315
8316
8317    //
8318    // C++:  void cv::applyColorMap(Mat src, Mat& dst, Mat userColor)
8319    //
8320
8321    /**
8322     * Applies a user colormap on a given image.
8323     *
8324     * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.
8325     * @param dst The result is the colormapped source image. Note: Mat::create is called on dst.
8326     * @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256
8327     */
8328    public static void applyColorMap(Mat src, Mat dst, Mat userColor) {
8329        applyColorMap_1(src.nativeObj, dst.nativeObj, userColor.nativeObj);
8330    }
8331
8332
8333    //
8334    // C++:  void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8335    //
8336
8337    /**
8338     * Draws a line segment connecting two points.
8339     *
8340     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8341     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8342     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8343     * lines are drawn using Gaussian filtering.
8344     *
8345     * @param img Image.
8346     * @param pt1 First point of the line segment.
8347     * @param pt2 Second point of the line segment.
8348     * @param color Line color.
8349     * @param thickness Line thickness.
8350     * @param lineType Type of the line. See #LineTypes.
8351     * @param shift Number of fractional bits in the point coordinates.
8352     */
8353    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) {
8354        line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8355    }
8356
8357    /**
8358     * Draws a line segment connecting two points.
8359     *
8360     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8361     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8362     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8363     * lines are drawn using Gaussian filtering.
8364     *
8365     * @param img Image.
8366     * @param pt1 First point of the line segment.
8367     * @param pt2 Second point of the line segment.
8368     * @param color Line color.
8369     * @param thickness Line thickness.
8370     * @param lineType Type of the line. See #LineTypes.
8371     */
8372    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) {
8373        line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8374    }
8375
8376    /**
8377     * Draws a line segment connecting two points.
8378     *
8379     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8380     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8381     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8382     * lines are drawn using Gaussian filtering.
8383     *
8384     * @param img Image.
8385     * @param pt1 First point of the line segment.
8386     * @param pt2 Second point of the line segment.
8387     * @param color Line color.
8388     * @param thickness Line thickness.
8389     */
8390    public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8391        line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8392    }
8393
8394    /**
8395     * Draws a line segment connecting two points.
8396     *
8397     * The function line draws the line segment between pt1 and pt2 points in the image. The line is
8398     * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected
8399     * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased
8400     * lines are drawn using Gaussian filtering.
8401     *
8402     * @param img Image.
8403     * @param pt1 First point of the line segment.
8404     * @param pt2 Second point of the line segment.
8405     * @param color Line color.
8406     */
8407    public static void line(Mat img, Point pt1, Point pt2, Scalar color) {
8408        line_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8409    }
8410
8411
8412    //
8413    // C++:  void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1)
8414    //
8415
8416    /**
8417     * Draws an arrow segment pointing from the first point to the second one.
8418     *
8419     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8420     *
8421     * @param img Image.
8422     * @param pt1 The point the arrow starts from.
8423     * @param pt2 The point the arrow points to.
8424     * @param color Line color.
8425     * @param thickness Line thickness.
8426     * @param line_type Type of the line. See #LineTypes
8427     * @param shift Number of fractional bits in the point coordinates.
8428     * @param tipLength The length of the arrow tip in relation to the arrow length
8429     */
8430    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift, double tipLength) {
8431        arrowedLine_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift, tipLength);
8432    }
8433
8434    /**
8435     * Draws an arrow segment pointing from the first point to the second one.
8436     *
8437     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8438     *
8439     * @param img Image.
8440     * @param pt1 The point the arrow starts from.
8441     * @param pt2 The point the arrow points to.
8442     * @param color Line color.
8443     * @param thickness Line thickness.
8444     * @param line_type Type of the line. See #LineTypes
8445     * @param shift Number of fractional bits in the point coordinates.
8446     */
8447    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift) {
8448        arrowedLine_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift);
8449    }
8450
8451    /**
8452     * Draws an arrow segment pointing from the first point to the second one.
8453     *
8454     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8455     *
8456     * @param img Image.
8457     * @param pt1 The point the arrow starts from.
8458     * @param pt2 The point the arrow points to.
8459     * @param color Line color.
8460     * @param thickness Line thickness.
8461     * @param line_type Type of the line. See #LineTypes
8462     */
8463    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type) {
8464        arrowedLine_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type);
8465    }
8466
8467    /**
8468     * Draws an arrow segment pointing from the first point to the second one.
8469     *
8470     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8471     *
8472     * @param img Image.
8473     * @param pt1 The point the arrow starts from.
8474     * @param pt2 The point the arrow points to.
8475     * @param color Line color.
8476     * @param thickness Line thickness.
8477     */
8478    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8479        arrowedLine_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8480    }
8481
8482    /**
8483     * Draws an arrow segment pointing from the first point to the second one.
8484     *
8485     * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.
8486     *
8487     * @param img Image.
8488     * @param pt1 The point the arrow starts from.
8489     * @param pt2 The point the arrow points to.
8490     * @param color Line color.
8491     */
8492    public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color) {
8493        arrowedLine_4(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8494    }
8495
8496
8497    //
8498    // C++:  void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8499    //
8500
8501    /**
8502     * Draws a simple, thick, or filled up-right rectangle.
8503     *
8504     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8505     * are pt1 and pt2.
8506     *
8507     * @param img Image.
8508     * @param pt1 Vertex of the rectangle.
8509     * @param pt2 Vertex of the rectangle opposite to pt1 .
8510     * @param color Rectangle color or brightness (grayscale image).
8511     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8512     * mean that the function has to draw a filled rectangle.
8513     * @param lineType Type of the line. See #LineTypes
8514     * @param shift Number of fractional bits in the point coordinates.
8515     */
8516    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) {
8517        rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8518    }
8519
8520    /**
8521     * Draws a simple, thick, or filled up-right rectangle.
8522     *
8523     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8524     * are pt1 and pt2.
8525     *
8526     * @param img Image.
8527     * @param pt1 Vertex of the rectangle.
8528     * @param pt2 Vertex of the rectangle opposite to pt1 .
8529     * @param color Rectangle color or brightness (grayscale image).
8530     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8531     * mean that the function has to draw a filled rectangle.
8532     * @param lineType Type of the line. See #LineTypes
8533     */
8534    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) {
8535        rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8536    }
8537
8538    /**
8539     * Draws a simple, thick, or filled up-right rectangle.
8540     *
8541     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8542     * are pt1 and pt2.
8543     *
8544     * @param img Image.
8545     * @param pt1 Vertex of the rectangle.
8546     * @param pt2 Vertex of the rectangle opposite to pt1 .
8547     * @param color Rectangle color or brightness (grayscale image).
8548     * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,
8549     * mean that the function has to draw a filled rectangle.
8550     */
8551    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) {
8552        rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8553    }
8554
8555    /**
8556     * Draws a simple, thick, or filled up-right rectangle.
8557     *
8558     * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners
8559     * are pt1 and pt2.
8560     *
8561     * @param img Image.
8562     * @param pt1 Vertex of the rectangle.
8563     * @param pt2 Vertex of the rectangle opposite to pt1 .
8564     * @param color Rectangle color or brightness (grayscale image).
8565     * mean that the function has to draw a filled rectangle.
8566     */
8567    public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) {
8568        rectangle_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8569    }
8570
8571
8572    //
8573    // C++:  void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8574    //
8575
8576    /**
8577     *
8578     *
8579     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8580     * r.br()-Point(1,1)` are opposite corners
8581     * @param img automatically generated
8582     * @param rec automatically generated
8583     * @param color automatically generated
8584     * @param thickness automatically generated
8585     * @param lineType automatically generated
8586     * @param shift automatically generated
8587     */
8588    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType, int shift) {
8589        rectangle_4(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8590    }
8591
8592    /**
8593     *
8594     *
8595     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8596     * r.br()-Point(1,1)` are opposite corners
8597     * @param img automatically generated
8598     * @param rec automatically generated
8599     * @param color automatically generated
8600     * @param thickness automatically generated
8601     * @param lineType automatically generated
8602     */
8603    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType) {
8604        rectangle_5(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8605    }
8606
8607    /**
8608     *
8609     *
8610     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8611     * r.br()-Point(1,1)` are opposite corners
8612     * @param img automatically generated
8613     * @param rec automatically generated
8614     * @param color automatically generated
8615     * @param thickness automatically generated
8616     */
8617    public static void rectangle(Mat img, Rect rec, Scalar color, int thickness) {
8618        rectangle_6(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8619    }
8620
8621    /**
8622     *
8623     *
8624     * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and
8625     * r.br()-Point(1,1)` are opposite corners
8626     * @param img automatically generated
8627     * @param rec automatically generated
8628     * @param color automatically generated
8629     */
8630    public static void rectangle(Mat img, Rect rec, Scalar color) {
8631        rectangle_7(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3]);
8632    }
8633
8634
8635    //
8636    // C++:  void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8637    //
8638
8639    /**
8640     * Draws a circle.
8641     *
8642     * The function cv::circle draws a simple or filled circle with a given center and radius.
8643     * @param img Image where the circle is drawn.
8644     * @param center Center of the circle.
8645     * @param radius Radius of the circle.
8646     * @param color Circle color.
8647     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8648     * mean that a filled circle is to be drawn.
8649     * @param lineType Type of the circle boundary. See #LineTypes
8650     * @param shift Number of fractional bits in the coordinates of the center and in the radius value.
8651     */
8652    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) {
8653        circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8654    }
8655
8656    /**
8657     * Draws a circle.
8658     *
8659     * The function cv::circle draws a simple or filled circle with a given center and radius.
8660     * @param img Image where the circle is drawn.
8661     * @param center Center of the circle.
8662     * @param radius Radius of the circle.
8663     * @param color Circle color.
8664     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8665     * mean that a filled circle is to be drawn.
8666     * @param lineType Type of the circle boundary. See #LineTypes
8667     */
8668    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType) {
8669        circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8670    }
8671
8672    /**
8673     * Draws a circle.
8674     *
8675     * The function cv::circle draws a simple or filled circle with a given center and radius.
8676     * @param img Image where the circle is drawn.
8677     * @param center Center of the circle.
8678     * @param radius Radius of the circle.
8679     * @param color Circle color.
8680     * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,
8681     * mean that a filled circle is to be drawn.
8682     */
8683    public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) {
8684        circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8685    }
8686
8687    /**
8688     * Draws a circle.
8689     *
8690     * The function cv::circle draws a simple or filled circle with a given center and radius.
8691     * @param img Image where the circle is drawn.
8692     * @param center Center of the circle.
8693     * @param radius Radius of the circle.
8694     * @param color Circle color.
8695     * mean that a filled circle is to be drawn.
8696     */
8697    public static void circle(Mat img, Point center, int radius, Scalar color) {
8698        circle_3(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]);
8699    }
8700
8701
8702    //
8703    // C++:  void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
8704    //
8705
8706    /**
8707     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8708     *
8709     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8710     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8711     * A piecewise-linear curve is used to approximate the elliptic arc
8712     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8713     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8714     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8715     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8716     * the meaning of the parameters to draw the blue arc.
8717     *
8718     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8719     *
8720     * @param img Image.
8721     * @param center Center of the ellipse.
8722     * @param axes Half of the size of the ellipse main axes.
8723     * @param angle Ellipse rotation angle in degrees.
8724     * @param startAngle Starting angle of the elliptic arc in degrees.
8725     * @param endAngle Ending angle of the elliptic arc in degrees.
8726     * @param color Ellipse color.
8727     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8728     * a filled ellipse sector is to be drawn.
8729     * @param lineType Type of the ellipse boundary. See #LineTypes
8730     * @param shift Number of fractional bits in the coordinates of the center and values of axes.
8731     */
8732    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) {
8733        ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
8734    }
8735
8736    /**
8737     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8738     *
8739     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8740     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8741     * A piecewise-linear curve is used to approximate the elliptic arc
8742     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8743     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8744     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8745     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8746     * the meaning of the parameters to draw the blue arc.
8747     *
8748     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8749     *
8750     * @param img Image.
8751     * @param center Center of the ellipse.
8752     * @param axes Half of the size of the ellipse main axes.
8753     * @param angle Ellipse rotation angle in degrees.
8754     * @param startAngle Starting angle of the elliptic arc in degrees.
8755     * @param endAngle Ending angle of the elliptic arc in degrees.
8756     * @param color Ellipse color.
8757     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8758     * a filled ellipse sector is to be drawn.
8759     * @param lineType Type of the ellipse boundary. See #LineTypes
8760     */
8761    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType) {
8762        ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8763    }
8764
8765    /**
8766     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8767     *
8768     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8769     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8770     * A piecewise-linear curve is used to approximate the elliptic arc
8771     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8772     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8773     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8774     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8775     * the meaning of the parameters to draw the blue arc.
8776     *
8777     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8778     *
8779     * @param img Image.
8780     * @param center Center of the ellipse.
8781     * @param axes Half of the size of the ellipse main axes.
8782     * @param angle Ellipse rotation angle in degrees.
8783     * @param startAngle Starting angle of the elliptic arc in degrees.
8784     * @param endAngle Ending angle of the elliptic arc in degrees.
8785     * @param color Ellipse color.
8786     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8787     * a filled ellipse sector is to be drawn.
8788     */
8789    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) {
8790        ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8791    }
8792
8793    /**
8794     * Draws a simple or thick elliptic arc or fills an ellipse sector.
8795     *
8796     * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic
8797     * arc, or a filled ellipse sector. The drawing code uses general parametric form.
8798     * A piecewise-linear curve is used to approximate the elliptic arc
8799     * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using
8800     * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first
8801     * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and
8802     * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains
8803     * the meaning of the parameters to draw the blue arc.
8804     *
8805     * ![Parameters of Elliptic Arc](pics/ellipse.svg)
8806     *
8807     * @param img Image.
8808     * @param center Center of the ellipse.
8809     * @param axes Half of the size of the ellipse main axes.
8810     * @param angle Ellipse rotation angle in degrees.
8811     * @param startAngle Starting angle of the elliptic arc in degrees.
8812     * @param endAngle Ending angle of the elliptic arc in degrees.
8813     * @param color Ellipse color.
8814     * a filled ellipse sector is to be drawn.
8815     */
8816    public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) {
8817        ellipse_3(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]);
8818    }
8819
8820
8821    //
8822    // C++:  void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8)
8823    //
8824
8825    /**
8826     *
8827     * @param img Image.
8828     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8829     * an ellipse inscribed in the rotated rectangle.
8830     * @param color Ellipse color.
8831     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8832     * a filled ellipse sector is to be drawn.
8833     * @param lineType Type of the ellipse boundary. See #LineTypes
8834     */
8835    public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) {
8836        ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
8837    }
8838
8839    /**
8840     *
8841     * @param img Image.
8842     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8843     * an ellipse inscribed in the rotated rectangle.
8844     * @param color Ellipse color.
8845     * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that
8846     * a filled ellipse sector is to be drawn.
8847     */
8848    public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) {
8849        ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
8850    }
8851
8852    /**
8853     *
8854     * @param img Image.
8855     * @param box Alternative ellipse representation via RotatedRect. This means that the function draws
8856     * an ellipse inscribed in the rotated rectangle.
8857     * @param color Ellipse color.
8858     * a filled ellipse sector is to be drawn.
8859     */
8860    public static void ellipse(Mat img, RotatedRect box, Scalar color) {
8861        ellipse_6(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]);
8862    }
8863
8864
8865    //
8866    // C++:  void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8)
8867    //
8868
8869    /**
8870     * Draws a marker on a predefined position in an image.
8871     *
8872     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8873     * marker types are supported, see #MarkerTypes for more information.
8874     *
8875     * @param img Image.
8876     * @param position The point where the crosshair is positioned.
8877     * @param color Line color.
8878     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8879     * @param thickness Line thickness.
8880     * @param line_type Type of the line, See #LineTypes
8881     * @param markerSize The length of the marker axis [default = 20 pixels]
8882     */
8883    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness, int line_type) {
8884        drawMarker_0(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness, line_type);
8885    }
8886
8887    /**
8888     * Draws a marker on a predefined position in an image.
8889     *
8890     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8891     * marker types are supported, see #MarkerTypes for more information.
8892     *
8893     * @param img Image.
8894     * @param position The point where the crosshair is positioned.
8895     * @param color Line color.
8896     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8897     * @param thickness Line thickness.
8898     * @param markerSize The length of the marker axis [default = 20 pixels]
8899     */
8900    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness) {
8901        drawMarker_1(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness);
8902    }
8903
8904    /**
8905     * Draws a marker on a predefined position in an image.
8906     *
8907     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8908     * marker types are supported, see #MarkerTypes for more information.
8909     *
8910     * @param img Image.
8911     * @param position The point where the crosshair is positioned.
8912     * @param color Line color.
8913     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8914     * @param markerSize The length of the marker axis [default = 20 pixels]
8915     */
8916    public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize) {
8917        drawMarker_2(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize);
8918    }
8919
8920    /**
8921     * Draws a marker on a predefined position in an image.
8922     *
8923     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8924     * marker types are supported, see #MarkerTypes for more information.
8925     *
8926     * @param img Image.
8927     * @param position The point where the crosshair is positioned.
8928     * @param color Line color.
8929     * @param markerType The specific type of marker you want to use, see #MarkerTypes
8930     */
8931    public static void drawMarker(Mat img, Point position, Scalar color, int markerType) {
8932        drawMarker_3(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType);
8933    }
8934
8935    /**
8936     * Draws a marker on a predefined position in an image.
8937     *
8938     * The function cv::drawMarker draws a marker on a given position in the image. For the moment several
8939     * marker types are supported, see #MarkerTypes for more information.
8940     *
8941     * @param img Image.
8942     * @param position The point where the crosshair is positioned.
8943     * @param color Line color.
8944     */
8945    public static void drawMarker(Mat img, Point position, Scalar color) {
8946        drawMarker_4(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3]);
8947    }
8948
8949
8950    //
8951    // C++:  void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0)
8952    //
8953
8954    /**
8955     * Fills a convex polygon.
8956     *
8957     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8958     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8959     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8960     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8961     *
8962     * @param img Image.
8963     * @param points Polygon vertices.
8964     * @param color Polygon color.
8965     * @param lineType Type of the polygon boundaries. See #LineTypes
8966     * @param shift Number of fractional bits in the vertex coordinates.
8967     */
8968    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) {
8969        Mat points_mat = points;
8970        fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift);
8971    }
8972
8973    /**
8974     * Fills a convex polygon.
8975     *
8976     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8977     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8978     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8979     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8980     *
8981     * @param img Image.
8982     * @param points Polygon vertices.
8983     * @param color Polygon color.
8984     * @param lineType Type of the polygon boundaries. See #LineTypes
8985     */
8986    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType) {
8987        Mat points_mat = points;
8988        fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType);
8989    }
8990
8991    /**
8992     * Fills a convex polygon.
8993     *
8994     * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the
8995     * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without
8996     * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)
8997     * twice at the most (though, its top-most and/or the bottom edge could be horizontal).
8998     *
8999     * @param img Image.
9000     * @param points Polygon vertices.
9001     * @param color Polygon color.
9002     */
9003    public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) {
9004        Mat points_mat = points;
9005        fillConvexPoly_2(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]);
9006    }
9007
9008
9009    //
9010    // C++:  void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point())
9011    //
9012
9013    /**
9014     * Fills the area bounded by one or more polygons.
9015     *
9016     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
9017     * complex areas, for example, areas with holes, contours with self-intersections (some of their
9018     * parts), and so forth.
9019     *
9020     * @param img Image.
9021     * @param pts Array of polygons where each polygon is represented as an array of points.
9022     * @param color Polygon color.
9023     * @param lineType Type of the polygon boundaries. See #LineTypes
9024     * @param shift Number of fractional bits in the vertex coordinates.
9025     * @param offset Optional offset of all points of the contours.
9026     */
9027    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift, Point offset) {
9028        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9029        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9030        fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y);
9031    }
9032
9033    /**
9034     * Fills the area bounded by one or more polygons.
9035     *
9036     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
9037     * complex areas, for example, areas with holes, contours with self-intersections (some of their
9038     * parts), and so forth.
9039     *
9040     * @param img Image.
9041     * @param pts Array of polygons where each polygon is represented as an array of points.
9042     * @param color Polygon color.
9043     * @param lineType Type of the polygon boundaries. See #LineTypes
9044     * @param shift Number of fractional bits in the vertex coordinates.
9045     */
9046    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift) {
9047        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9048        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9049        fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift);
9050    }
9051
9052    /**
9053     * Fills the area bounded by one or more polygons.
9054     *
9055     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
9056     * complex areas, for example, areas with holes, contours with self-intersections (some of their
9057     * parts), and so forth.
9058     *
9059     * @param img Image.
9060     * @param pts Array of polygons where each polygon is represented as an array of points.
9061     * @param color Polygon color.
9062     * @param lineType Type of the polygon boundaries. See #LineTypes
9063     */
9064    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType) {
9065        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9066        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9067        fillPoly_2(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType);
9068    }
9069
9070    /**
9071     * Fills the area bounded by one or more polygons.
9072     *
9073     * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill
9074     * complex areas, for example, areas with holes, contours with self-intersections (some of their
9075     * parts), and so forth.
9076     *
9077     * @param img Image.
9078     * @param pts Array of polygons where each polygon is represented as an array of points.
9079     * @param color Polygon color.
9080     */
9081    public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color) {
9082        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9083        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9084        fillPoly_3(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]);
9085    }
9086
9087
9088    //
9089    // C++:  void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
9090    //
9091
9092    /**
9093     * Draws several polygonal curves.
9094     *
9095     * @param img Image.
9096     * @param pts Array of polygonal curves.
9097     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9098     * the function draws a line from the last vertex of each curve to its first vertex.
9099     * @param color Polyline color.
9100     * @param thickness Thickness of the polyline edges.
9101     * @param lineType Type of the line segments. See #LineTypes
9102     * @param shift Number of fractional bits in the vertex coordinates.
9103     *
9104     * The function cv::polylines draws one or more polygonal curves.
9105     */
9106    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) {
9107        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9108        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9109        polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift);
9110    }
9111
9112    /**
9113     * Draws several polygonal curves.
9114     *
9115     * @param img Image.
9116     * @param pts Array of polygonal curves.
9117     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9118     * the function draws a line from the last vertex of each curve to its first vertex.
9119     * @param color Polyline color.
9120     * @param thickness Thickness of the polyline edges.
9121     * @param lineType Type of the line segments. See #LineTypes
9122     *
9123     * The function cv::polylines draws one or more polygonal curves.
9124     */
9125    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType) {
9126        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9127        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9128        polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
9129    }
9130
9131    /**
9132     * Draws several polygonal curves.
9133     *
9134     * @param img Image.
9135     * @param pts Array of polygonal curves.
9136     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9137     * the function draws a line from the last vertex of each curve to its first vertex.
9138     * @param color Polyline color.
9139     * @param thickness Thickness of the polyline edges.
9140     *
9141     * The function cv::polylines draws one or more polygonal curves.
9142     */
9143    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness) {
9144        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9145        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9146        polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9147    }
9148
9149    /**
9150     * Draws several polygonal curves.
9151     *
9152     * @param img Image.
9153     * @param pts Array of polygonal curves.
9154     * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,
9155     * the function draws a line from the last vertex of each curve to its first vertex.
9156     * @param color Polyline color.
9157     *
9158     * The function cv::polylines draws one or more polygonal curves.
9159     */
9160    public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color) {
9161        List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0);
9162        Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm);
9163        polylines_3(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]);
9164    }
9165
9166
9167    //
9168    // C++:  void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
9169    //
9170
9171    /**
9172     * Draws contours outlines or filled contours.
9173     *
9174     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9175     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9176     * connected components from the binary image and label them: :
9177     * INCLUDE: snippets/imgproc_drawContours.cpp
9178     *
9179     * @param image Destination image.
9180     * @param contours All the input contours. Each contour is stored as a point vector.
9181     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9182     * @param color Color of the contours.
9183     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9184     * thickness=#FILLED ), the contour interiors are drawn.
9185     * @param lineType Line connectivity. See #LineTypes
9186     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9187     * some of the contours (see maxLevel ).
9188     * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
9189     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9190     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9191     * parameter is only taken into account when there is hierarchy available.
9192     * @param offset Optional contour shift parameter. Shift all the drawn contours by the specified
9193     * \(\texttt{offset}=(dx,dy)\) .
9194     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9195     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9196     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9197     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9198     * of contours, or iterate over the collection using contourIdx parameter.
9199     */
9200    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) {
9201        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9202        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9203        drawContours_0(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel, offset.x, offset.y);
9204    }
9205
9206    /**
9207     * Draws contours outlines or filled contours.
9208     *
9209     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9210     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9211     * connected components from the binary image and label them: :
9212     * INCLUDE: snippets/imgproc_drawContours.cpp
9213     *
9214     * @param image Destination image.
9215     * @param contours All the input contours. Each contour is stored as a point vector.
9216     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9217     * @param color Color of the contours.
9218     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9219     * thickness=#FILLED ), the contour interiors are drawn.
9220     * @param lineType Line connectivity. See #LineTypes
9221     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9222     * some of the contours (see maxLevel ).
9223     * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.
9224     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9225     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9226     * parameter is only taken into account when there is hierarchy available.
9227     * \(\texttt{offset}=(dx,dy)\) .
9228     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9229     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9230     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9231     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9232     * of contours, or iterate over the collection using contourIdx parameter.
9233     */
9234    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel) {
9235        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9236        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9237        drawContours_1(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel);
9238    }
9239
9240    /**
9241     * Draws contours outlines or filled contours.
9242     *
9243     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9244     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9245     * connected components from the binary image and label them: :
9246     * INCLUDE: snippets/imgproc_drawContours.cpp
9247     *
9248     * @param image Destination image.
9249     * @param contours All the input contours. Each contour is stored as a point vector.
9250     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9251     * @param color Color of the contours.
9252     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9253     * thickness=#FILLED ), the contour interiors are drawn.
9254     * @param lineType Line connectivity. See #LineTypes
9255     * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only
9256     * some of the contours (see maxLevel ).
9257     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9258     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9259     * parameter is only taken into account when there is hierarchy available.
9260     * \(\texttt{offset}=(dx,dy)\) .
9261     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9262     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9263     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9264     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9265     * of contours, or iterate over the collection using contourIdx parameter.
9266     */
9267    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy) {
9268        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9269        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9270        drawContours_2(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj);
9271    }
9272
9273    /**
9274     * Draws contours outlines or filled contours.
9275     *
9276     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9277     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9278     * connected components from the binary image and label them: :
9279     * INCLUDE: snippets/imgproc_drawContours.cpp
9280     *
9281     * @param image Destination image.
9282     * @param contours All the input contours. Each contour is stored as a point vector.
9283     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9284     * @param color Color of the contours.
9285     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9286     * thickness=#FILLED ), the contour interiors are drawn.
9287     * @param lineType Line connectivity. See #LineTypes
9288     * some of the contours (see maxLevel ).
9289     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9290     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9291     * parameter is only taken into account when there is hierarchy available.
9292     * \(\texttt{offset}=(dx,dy)\) .
9293     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9294     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9295     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9296     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9297     * of contours, or iterate over the collection using contourIdx parameter.
9298     */
9299    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType) {
9300        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9301        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9302        drawContours_3(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
9303    }
9304
9305    /**
9306     * Draws contours outlines or filled contours.
9307     *
9308     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9309     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9310     * connected components from the binary image and label them: :
9311     * INCLUDE: snippets/imgproc_drawContours.cpp
9312     *
9313     * @param image Destination image.
9314     * @param contours All the input contours. Each contour is stored as a point vector.
9315     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9316     * @param color Color of the contours.
9317     * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,
9318     * thickness=#FILLED ), the contour interiors are drawn.
9319     * some of the contours (see maxLevel ).
9320     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9321     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9322     * parameter is only taken into account when there is hierarchy available.
9323     * \(\texttt{offset}=(dx,dy)\) .
9324     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9325     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9326     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9327     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9328     * of contours, or iterate over the collection using contourIdx parameter.
9329     */
9330    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness) {
9331        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9332        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9333        drawContours_4(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9334    }
9335
9336    /**
9337     * Draws contours outlines or filled contours.
9338     *
9339     * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area
9340     * bounded by the contours if \(\texttt{thickness}&lt;0\) . The example below shows how to retrieve
9341     * connected components from the binary image and label them: :
9342     * INCLUDE: snippets/imgproc_drawContours.cpp
9343     *
9344     * @param image Destination image.
9345     * @param contours All the input contours. Each contour is stored as a point vector.
9346     * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
9347     * @param color Color of the contours.
9348     * thickness=#FILLED ), the contour interiors are drawn.
9349     * some of the contours (see maxLevel ).
9350     * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function
9351     * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This
9352     * parameter is only taken into account when there is hierarchy available.
9353     * \(\texttt{offset}=(dx,dy)\) .
9354     * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly
9355     * even when no hierarchy data is provided. This is done by analyzing all the outlines together
9356     * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved
9357     * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group
9358     * of contours, or iterate over the collection using contourIdx parameter.
9359     */
9360    public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color) {
9361        List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0);
9362        Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm);
9363        drawContours_5(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3]);
9364    }
9365
9366
9367    //
9368    // C++:  bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2)
9369    //
9370
9371    /**
9372     *
9373     * @param imgRect Image rectangle.
9374     * @param pt1 First line point.
9375     * @param pt2 Second line point.
9376     * @return automatically generated
9377     */
9378    public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) {
9379        double[] pt1_out = new double[2];
9380        double[] pt2_out = new double[2];
9381        boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out);
9382        if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } 
9383        if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } 
9384        return retVal;
9385    }
9386
9387
9388    //
9389    // C++:  void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts)
9390    //
9391
9392    /**
9393     * Approximates an elliptic arc with a polyline.
9394     *
9395     * The function ellipse2Poly computes the vertices of a polyline that approximates the specified
9396     * elliptic arc. It is used by #ellipse. If {@code arcStart} is greater than {@code arcEnd}, they are swapped.
9397     *
9398     * @param center Center of the arc.
9399     * @param axes Half of the size of the ellipse main axes. See #ellipse for details.
9400     * @param angle Rotation angle of the ellipse in degrees. See #ellipse for details.
9401     * @param arcStart Starting angle of the elliptic arc in degrees.
9402     * @param arcEnd Ending angle of the elliptic arc in degrees.
9403     * @param delta Angle between the subsequent polyline vertices. It defines the approximation
9404     * accuracy.
9405     * @param pts Output vector of polyline vertices.
9406     */
9407    public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) {
9408        Mat pts_mat = pts;
9409        ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj);
9410    }
9411
9412
9413    //
9414    // C++:  void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false)
9415    //
9416
9417    /**
9418     * Draws a text string.
9419     *
9420     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9421     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9422     * example.
9423     *
9424     * @param img Image.
9425     * @param text Text string to be drawn.
9426     * @param org Bottom-left corner of the text string in the image.
9427     * @param fontFace Font type, see #HersheyFonts.
9428     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9429     * @param color Text color.
9430     * @param thickness Thickness of the lines used to draw a text.
9431     * @param lineType Line type. See #LineTypes
9432     * @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise,
9433     * it is at the top-left corner.
9434     */
9435    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) {
9436        putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin);
9437    }
9438
9439    /**
9440     * Draws a text string.
9441     *
9442     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9443     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9444     * example.
9445     *
9446     * @param img Image.
9447     * @param text Text string to be drawn.
9448     * @param org Bottom-left corner of the text string in the image.
9449     * @param fontFace Font type, see #HersheyFonts.
9450     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9451     * @param color Text color.
9452     * @param thickness Thickness of the lines used to draw a text.
9453     * @param lineType Line type. See #LineTypes
9454     * it is at the top-left corner.
9455     */
9456    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType) {
9457        putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType);
9458    }
9459
9460    /**
9461     * Draws a text string.
9462     *
9463     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9464     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9465     * example.
9466     *
9467     * @param img Image.
9468     * @param text Text string to be drawn.
9469     * @param org Bottom-left corner of the text string in the image.
9470     * @param fontFace Font type, see #HersheyFonts.
9471     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9472     * @param color Text color.
9473     * @param thickness Thickness of the lines used to draw a text.
9474     * it is at the top-left corner.
9475     */
9476    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) {
9477        putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness);
9478    }
9479
9480    /**
9481     * Draws a text string.
9482     *
9483     * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered
9484     * using the specified font are replaced by question marks. See #getTextSize for a text rendering code
9485     * example.
9486     *
9487     * @param img Image.
9488     * @param text Text string to be drawn.
9489     * @param org Bottom-left corner of the text string in the image.
9490     * @param fontFace Font type, see #HersheyFonts.
9491     * @param fontScale Font scale factor that is multiplied by the font-specific base size.
9492     * @param color Text color.
9493     * it is at the top-left corner.
9494     */
9495    public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) {
9496        putText_3(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]);
9497    }
9498
9499
9500    //
9501    // C++:  double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1)
9502    //
9503
9504    /**
9505     * Calculates the font-specific size to use to achieve a given height in pixels.
9506     *
9507     * @param fontFace Font to use, see cv::HersheyFonts.
9508     * @param pixelHeight Pixel height to compute the fontScale for
9509     * @param thickness Thickness of lines used to render the text.See putText for details.
9510     * @return The fontSize to use for cv::putText
9511     *
9512     * SEE: cv::putText
9513     */
9514    public static double getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness) {
9515        return getFontScaleFromHeight_0(fontFace, pixelHeight, thickness);
9516    }
9517
9518    /**
9519     * Calculates the font-specific size to use to achieve a given height in pixels.
9520     *
9521     * @param fontFace Font to use, see cv::HersheyFonts.
9522     * @param pixelHeight Pixel height to compute the fontScale for
9523     * @return The fontSize to use for cv::putText
9524     *
9525     * SEE: cv::putText
9526     */
9527    public static double getFontScaleFromHeight(int fontFace, int pixelHeight) {
9528        return getFontScaleFromHeight_1(fontFace, pixelHeight);
9529    }
9530
9531
9532    //
9533    // C++:  void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
9534    //
9535
9536    /**
9537     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9538     *
9539     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9540     *
9541     * SEE: HoughLines
9542     * @param image automatically generated
9543     * @param lines automatically generated
9544     * @param rho automatically generated
9545     * @param theta automatically generated
9546     * @param threshold automatically generated
9547     * @param srn automatically generated
9548     * @param stn automatically generated
9549     * @param min_theta automatically generated
9550     * @param max_theta automatically generated
9551     */
9552    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) {
9553        HoughLinesWithAccumulator_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta);
9554    }
9555
9556    /**
9557     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9558     *
9559     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9560     *
9561     * SEE: HoughLines
9562     * @param image automatically generated
9563     * @param lines automatically generated
9564     * @param rho automatically generated
9565     * @param theta automatically generated
9566     * @param threshold automatically generated
9567     * @param srn automatically generated
9568     * @param stn automatically generated
9569     * @param min_theta automatically generated
9570     */
9571    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) {
9572        HoughLinesWithAccumulator_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta);
9573    }
9574
9575    /**
9576     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9577     *
9578     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9579     *
9580     * SEE: HoughLines
9581     * @param image automatically generated
9582     * @param lines automatically generated
9583     * @param rho automatically generated
9584     * @param theta automatically generated
9585     * @param threshold automatically generated
9586     * @param srn automatically generated
9587     * @param stn automatically generated
9588     */
9589    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) {
9590        HoughLinesWithAccumulator_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn);
9591    }
9592
9593    /**
9594     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9595     *
9596     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9597     *
9598     * SEE: HoughLines
9599     * @param image automatically generated
9600     * @param lines automatically generated
9601     * @param rho automatically generated
9602     * @param theta automatically generated
9603     * @param threshold automatically generated
9604     * @param srn automatically generated
9605     */
9606    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn) {
9607        HoughLinesWithAccumulator_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn);
9608    }
9609
9610    /**
9611     * Finds lines in a binary image using the standard Hough transform and get accumulator.
9612     *
9613     * <b>Note:</b> This function is for bindings use only. Use original function in C++ code
9614     *
9615     * SEE: HoughLines
9616     * @param image automatically generated
9617     * @param lines automatically generated
9618     * @param rho automatically generated
9619     * @param theta automatically generated
9620     * @param threshold automatically generated
9621     */
9622    public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold) {
9623        HoughLinesWithAccumulator_4(image.nativeObj, lines.nativeObj, rho, theta, threshold);
9624    }
9625
9626
9627
9628// C++: Size getTextSize(const String& text, int fontFace, double fontScale, int thickness, int* baseLine);
9629//javadoc:getTextSize(text, fontFace, fontScale, thickness, baseLine)
9630public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) {
9631    if(baseLine != null && baseLine.length != 1)
9632        throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'.");
9633    Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine));
9634    return retVal;
9635}
9636
9637
9638
9639
9640    // C++:  Ptr_LineSegmentDetector cv::createLineSegmentDetector(int refine = LSD_REFINE_STD, double scale = 0.8, double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, double log_eps = 0, double density_th = 0.7, int n_bins = 1024)
9641    private static native long createLineSegmentDetector_0(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th, int n_bins);
9642    private static native long createLineSegmentDetector_1(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th);
9643    private static native long createLineSegmentDetector_2(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps);
9644    private static native long createLineSegmentDetector_3(int refine, double scale, double sigma_scale, double quant, double ang_th);
9645    private static native long createLineSegmentDetector_4(int refine, double scale, double sigma_scale, double quant);
9646    private static native long createLineSegmentDetector_5(int refine, double scale, double sigma_scale);
9647    private static native long createLineSegmentDetector_6(int refine, double scale);
9648    private static native long createLineSegmentDetector_7(int refine);
9649    private static native long createLineSegmentDetector_8();
9650
9651    // C++:  Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
9652    private static native long getGaussianKernel_0(int ksize, double sigma, int ktype);
9653    private static native long getGaussianKernel_1(int ksize, double sigma);
9654
9655    // C++:  void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
9656    private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype);
9657    private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize);
9658    private static native void getDerivKernels_2(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize);
9659
9660    // C++:  Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
9661    private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype);
9662    private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi);
9663    private static native long getGaborKernel_2(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma);
9664
9665    // C++:  Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1))
9666    private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9667    private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height);
9668
9669    // C++:  void cv::medianBlur(Mat src, Mat& dst, int ksize)
9670    private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize);
9671
9672    // C++:  void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT)
9673    private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType);
9674    private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY);
9675    private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX);
9676
9677    // C++:  void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
9678    private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType);
9679    private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace);
9680
9681    // C++:  void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
9682    private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType);
9683    private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize);
9684    private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9685    private static native void boxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height);
9686
9687    // C++:  void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT)
9688    private static native void sqrBoxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType);
9689    private static native void sqrBoxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize);
9690    private static native void sqrBoxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9691    private static native void sqrBoxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height);
9692
9693    // C++:  void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
9694    private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType);
9695    private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
9696    private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height);
9697
9698    // C++:  void cv::stackBlur(Mat src, Mat& dst, Size ksize)
9699    private static native void stackBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height);
9700
9701    // C++:  void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
9702    private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
9703    private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta);
9704    private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y);
9705    private static native void filter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj);
9706
9707    // C++:  void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
9708    private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
9709    private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta);
9710    private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y);
9711    private static native void sepFilter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj);
9712
9713    // C++:  void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9714    private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
9715    private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta);
9716    private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale);
9717    private static native void Sobel_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize);
9718    private static native void Sobel_4(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
9719
9720    // C++:  void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT)
9721    private static native void spatialGradient_0(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize, int borderType);
9722    private static native void spatialGradient_1(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize);
9723    private static native void spatialGradient_2(long src_nativeObj, long dx_nativeObj, long dy_nativeObj);
9724
9725    // C++:  void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9726    private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType);
9727    private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta);
9728    private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale);
9729    private static native void Scharr_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
9730
9731    // C++:  void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
9732    private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType);
9733    private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta);
9734    private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale);
9735    private static native void Laplacian_3(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize);
9736    private static native void Laplacian_4(long src_nativeObj, long dst_nativeObj, int ddepth);
9737
9738    // C++:  void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
9739    private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient);
9740    private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize);
9741    private static native void Canny_2(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2);
9742
9743    // C++:  void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false)
9744    private static native void Canny_3(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2, boolean L2gradient);
9745    private static native void Canny_4(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2);
9746
9747    // C++:  void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
9748    private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
9749    private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
9750    private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize);
9751
9752    // C++:  void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
9753    private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType);
9754    private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k);
9755
9756    // C++:  void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
9757    private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
9758    private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
9759
9760    // C++:  void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT)
9761    private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType);
9762    private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize);
9763
9764    // C++:  void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria)
9765    private static native void cornerSubPix_0(long image_nativeObj, long corners_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon);
9766
9767    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
9768    private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k);
9769    private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector);
9770    private static native void goodFeaturesToTrack_2(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize);
9771    private static native void goodFeaturesToTrack_3(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj);
9772    private static native void goodFeaturesToTrack_4(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance);
9773
9774    // C++:  void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04)
9775    private static native void goodFeaturesToTrack_5(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k);
9776    private static native void goodFeaturesToTrack_6(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector);
9777    private static native void goodFeaturesToTrack_7(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize);
9778
9779    // C++:  void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04)
9780    private static native void goodFeaturesToTrackWithQuality_0(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k);
9781    private static native void goodFeaturesToTrackWithQuality_1(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector);
9782    private static native void goodFeaturesToTrackWithQuality_2(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize);
9783    private static native void goodFeaturesToTrackWithQuality_3(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize);
9784    private static native void goodFeaturesToTrackWithQuality_4(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj);
9785
9786    // C++:  void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
9787    private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta);
9788    private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta);
9789    private static native void HoughLines_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn);
9790    private static native void HoughLines_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn);
9791    private static native void HoughLines_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
9792
9793    // C++:  void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
9794    private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap);
9795    private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength);
9796    private static native void HoughLinesP_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
9797
9798    // C++:  void cv::HoughLinesPointSet(Mat point, Mat& lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step)
9799    private static native void HoughLinesPointSet_0(long point_nativeObj, long lines_nativeObj, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step);
9800
9801    // C++:  void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
9802    private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius);
9803    private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius);
9804    private static native void HoughCircles_2(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2);
9805    private static native void HoughCircles_3(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1);
9806    private static native void HoughCircles_4(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist);
9807
9808    // C++:  void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9809    private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9810    private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9811    private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9812    private static native void erode_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y);
9813    private static native void erode_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
9814
9815    // C++:  void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9816    private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9817    private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9818    private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9819    private static native void dilate_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y);
9820    private static native void dilate_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
9821
9822    // C++:  void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
9823    private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9824    private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType);
9825    private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
9826    private static native void morphologyEx_3(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y);
9827    private static native void morphologyEx_4(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj);
9828
9829    // C++:  void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
9830    private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation);
9831    private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy);
9832    private static native void resize_2(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx);
9833    private static native void resize_3(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height);
9834
9835    // C++:  void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9836    private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9837    private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode);
9838    private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
9839    private static native void warpAffine_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
9840
9841    // C++:  void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9842    private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9843    private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode);
9844    private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
9845    private static native void warpPerspective_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
9846
9847    // C++:  void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
9848    private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
9849    private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode);
9850    private static native void remap_2(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation);
9851
9852    // C++:  void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
9853    private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation);
9854    private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type);
9855
9856    // C++:  Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale)
9857    private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale);
9858
9859    // C++:  void cv::invertAffineTransform(Mat M, Mat& iM)
9860    private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj);
9861
9862    // C++:  Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU)
9863    private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj, int solveMethod);
9864    private static native long getPerspectiveTransform_1(long src_nativeObj, long dst_nativeObj);
9865
9866    // C++:  Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst)
9867    private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj);
9868
9869    // C++:  void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
9870    private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType);
9871    private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj);
9872
9873    // C++:  void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags)
9874    private static native void logPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double M, int flags);
9875
9876    // C++:  void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags)
9877    private static native void linearPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double maxRadius, int flags);
9878
9879    // C++:  void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags)
9880    private static native void warpPolar_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double center_x, double center_y, double maxRadius, int flags);
9881
9882    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1)
9883    private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth, int sqdepth);
9884    private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth);
9885    private static native void integral3_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj);
9886
9887    // C++:  void cv::integral(Mat src, Mat& sum, int sdepth = -1)
9888    private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth);
9889    private static native void integral_1(long src_nativeObj, long sum_nativeObj);
9890
9891    // C++:  void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1)
9892    private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth, int sqdepth);
9893    private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth);
9894    private static native void integral2_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj);
9895
9896    // C++:  void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat())
9897    private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
9898    private static native void accumulate_1(long src_nativeObj, long dst_nativeObj);
9899
9900    // C++:  void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
9901    private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
9902    private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj);
9903
9904    // C++:  void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
9905    private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj);
9906    private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj);
9907
9908    // C++:  void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
9909    private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj);
9910    private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha);
9911
9912    // C++:  Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0)
9913    private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out);
9914    private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj);
9915    private static native double[] phaseCorrelate_2(long src1_nativeObj, long src2_nativeObj);
9916
9917    // C++:  void cv::createHanningWindow(Mat& dst, Size winSize, int type)
9918    private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type);
9919
9920    // C++:  void cv::divSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false)
9921    private static native void divSpectrums_0(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags, boolean conjB);
9922    private static native void divSpectrums_1(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags);
9923
9924    // C++:  double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type)
9925    private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type);
9926
9927    // C++:  void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
9928    private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C);
9929
9930    // C++:  void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
9931    private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
9932    private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
9933    private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj);
9934
9935    // C++:  void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
9936    private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
9937    private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
9938    private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj);
9939
9940    // C++:  void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
9941    private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate);
9942    private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj);
9943
9944    // C++:  void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
9945    private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale);
9946
9947    // C++:  double cv::compareHist(Mat H1, Mat H2, int method)
9948    private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method);
9949
9950    // C++:  void cv::equalizeHist(Mat src, Mat& dst)
9951    private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj);
9952
9953    // C++:  Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
9954    private static native long createCLAHE_0(double clipLimit, double tileGridSize_width, double tileGridSize_height);
9955    private static native long createCLAHE_1(double clipLimit);
9956    private static native long createCLAHE_2();
9957
9958    // C++:  float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat())
9959    private static native float EMD_0(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj, long flow_nativeObj);
9960    private static native float EMD_1(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj);
9961    private static native float EMD_3(long signature1_nativeObj, long signature2_nativeObj, int distType);
9962
9963    // C++:  void cv::watershed(Mat image, Mat& markers)
9964    private static native void watershed_0(long image_nativeObj, long markers_nativeObj);
9965
9966    // C++:  void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
9967    private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon);
9968    private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel);
9969    private static native void pyrMeanShiftFiltering_2(long src_nativeObj, long dst_nativeObj, double sp, double sr);
9970
9971    // C++:  void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
9972    private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode);
9973    private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount);
9974
9975    // C++:  void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
9976    private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType);
9977    private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize);
9978
9979    // C++:  void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F)
9980    private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize, int dstType);
9981    private static native void distanceTransform_1(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize);
9982
9983    // C++:  int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
9984    private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags);
9985    private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3);
9986    private static native int floodFill_2(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3);
9987    private static native int floodFill_3(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out);
9988    private static native int floodFill_4(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3);
9989
9990    // C++:  void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst)
9991    private static native void blendLinear_0(long src1_nativeObj, long src2_nativeObj, long weights1_nativeObj, long weights2_nativeObj, long dst_nativeObj);
9992
9993    // C++:  void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0)
9994    private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn);
9995    private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code);
9996
9997    // C++:  void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code)
9998    private static native void cvtColorTwoPlane_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int code);
9999
10000    // C++:  void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0)
10001    private static native void demosaicing_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn);
10002    private static native void demosaicing_1(long src_nativeObj, long dst_nativeObj, int code);
10003
10004    // C++:  Moments cv::moments(Mat array, bool binaryImage = false)
10005    private static native double[] moments_0(long array_nativeObj, boolean binaryImage);
10006    private static native double[] moments_1(long array_nativeObj);
10007
10008    // C++:  void cv::HuMoments(Moments m, Mat& hu)
10009    private static native void HuMoments_0(double m_m00, double m_m10, double m_m01, double m_m20, double m_m11, double m_m02, double m_m30, double m_m21, double m_m12, double m_m03, long hu_nativeObj);
10010
10011    // C++:  void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat())
10012    private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method, long mask_nativeObj);
10013    private static native void matchTemplate_1(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method);
10014
10015    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype)
10016    private static native int connectedComponentsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype, int ccltype);
10017
10018    // C++:  int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S)
10019    private static native int connectedComponents_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype);
10020    private static native int connectedComponents_1(long image_nativeObj, long labels_nativeObj, int connectivity);
10021    private static native int connectedComponents_2(long image_nativeObj, long labels_nativeObj);
10022
10023    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype)
10024    private static native int connectedComponentsWithStatsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype, int ccltype);
10025
10026    // C++:  int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S)
10027    private static native int connectedComponentsWithStats_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype);
10028    private static native int connectedComponentsWithStats_1(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity);
10029    private static native int connectedComponentsWithStats_2(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj);
10030
10031    // C++:  void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point())
10032    private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y);
10033    private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method);
10034
10035    // C++:  void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
10036    private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed);
10037
10038    // C++:  double cv::arcLength(vector_Point2f curve, bool closed)
10039    private static native double arcLength_0(long curve_mat_nativeObj, boolean closed);
10040
10041    // C++:  Rect cv::boundingRect(Mat array)
10042    private static native double[] boundingRect_0(long array_nativeObj);
10043
10044    // C++:  double cv::contourArea(Mat contour, bool oriented = false)
10045    private static native double contourArea_0(long contour_nativeObj, boolean oriented);
10046    private static native double contourArea_1(long contour_nativeObj);
10047
10048    // C++:  RotatedRect cv::minAreaRect(vector_Point2f points)
10049    private static native double[] minAreaRect_0(long points_mat_nativeObj);
10050
10051    // C++:  void cv::boxPoints(RotatedRect box, Mat& points)
10052    private static native void boxPoints_0(double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, long points_nativeObj);
10053
10054    // C++:  void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
10055    private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out);
10056
10057    // C++:  double cv::minEnclosingTriangle(Mat points, Mat& triangle)
10058    private static native double minEnclosingTriangle_0(long points_nativeObj, long triangle_nativeObj);
10059
10060    // C++:  double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter)
10061    private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter);
10062
10063    // C++:  void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false,  _hidden_  returnPoints = true)
10064    private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise);
10065    private static native void convexHull_2(long points_mat_nativeObj, long hull_mat_nativeObj);
10066
10067    // C++:  void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
10068    private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj);
10069
10070    // C++:  bool cv::isContourConvex(vector_Point contour)
10071    private static native boolean isContourConvex_0(long contour_mat_nativeObj);
10072
10073    // C++:  float cv::intersectConvexConvex(Mat p1, Mat p2, Mat& p12, bool handleNested = true)
10074    private static native float intersectConvexConvex_0(long p1_nativeObj, long p2_nativeObj, long p12_nativeObj, boolean handleNested);
10075    private static native float intersectConvexConvex_1(long p1_nativeObj, long p2_nativeObj, long p12_nativeObj);
10076
10077    // C++:  RotatedRect cv::fitEllipse(vector_Point2f points)
10078    private static native double[] fitEllipse_0(long points_mat_nativeObj);
10079
10080    // C++:  RotatedRect cv::fitEllipseAMS(Mat points)
10081    private static native double[] fitEllipseAMS_0(long points_nativeObj);
10082
10083    // C++:  RotatedRect cv::fitEllipseDirect(Mat points)
10084    private static native double[] fitEllipseDirect_0(long points_nativeObj);
10085
10086    // C++:  void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps)
10087    private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps);
10088
10089    // C++:  double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
10090    private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist);
10091
10092    // C++:  int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion)
10093    private static native int rotatedRectangleIntersection_0(double rect1_center_x, double rect1_center_y, double rect1_size_width, double rect1_size_height, double rect1_angle, double rect2_center_x, double rect2_center_y, double rect2_size_width, double rect2_size_height, double rect2_angle, long intersectingRegion_nativeObj);
10094
10095    // C++:  Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard()
10096    private static native long createGeneralizedHoughBallard_0();
10097
10098    // C++:  Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil()
10099    private static native long createGeneralizedHoughGuil_0();
10100
10101    // C++:  void cv::applyColorMap(Mat src, Mat& dst, int colormap)
10102    private static native void applyColorMap_0(long src_nativeObj, long dst_nativeObj, int colormap);
10103
10104    // C++:  void cv::applyColorMap(Mat src, Mat& dst, Mat userColor)
10105    private static native void applyColorMap_1(long src_nativeObj, long dst_nativeObj, long userColor_nativeObj);
10106
10107    // C++:  void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10108    private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10109    private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10110    private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10111    private static native void line_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
10112
10113    // C++:  void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1)
10114    private static native void arrowedLine_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift, double tipLength);
10115    private static native void arrowedLine_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift);
10116    private static native void arrowedLine_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type);
10117    private static native void arrowedLine_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10118    private static native void arrowedLine_4(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
10119
10120    // C++:  void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10121    private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10122    private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10123    private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10124    private static native void rectangle_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3);
10125
10126    // C++:  void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10127    private static native void rectangle_4(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10128    private static native void rectangle_5(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10129    private static native void rectangle_6(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10130    private static native void rectangle_7(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3);
10131
10132    // C++:  void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10133    private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10134    private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10135    private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10136    private static native void circle_3(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3);
10137
10138    // C++:  void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10139    private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10140    private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10141    private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10142    private static native void ellipse_3(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3);
10143
10144    // C++:  void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8)
10145    private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10146    private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10147    private static native void ellipse_6(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3);
10148
10149    // C++:  void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8)
10150    private static native void drawMarker_0(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness, int line_type);
10151    private static native void drawMarker_1(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness);
10152    private static native void drawMarker_2(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize);
10153    private static native void drawMarker_3(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType);
10154    private static native void drawMarker_4(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3);
10155
10156    // C++:  void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0)
10157    private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift);
10158    private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType);
10159    private static native void fillConvexPoly_2(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3);
10160
10161    // C++:  void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point())
10162    private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y);
10163    private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift);
10164    private static native void fillPoly_2(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType);
10165    private static native void fillPoly_3(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3);
10166
10167    // C++:  void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0)
10168    private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift);
10169    private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10170    private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10171    private static native void polylines_3(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3);
10172
10173    // C++:  void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
10174    private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y);
10175    private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel);
10176    private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj);
10177    private static native void drawContours_3(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10178    private static native void drawContours_4(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10179    private static native void drawContours_5(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3);
10180
10181    // C++:  bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2)
10182    private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out);
10183
10184    // C++:  void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts)
10185    private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj);
10186
10187    // C++:  void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false)
10188    private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin);
10189    private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType);
10190    private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
10191    private static native void putText_3(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3);
10192
10193    // C++:  double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1)
10194    private static native double getFontScaleFromHeight_0(int fontFace, int pixelHeight, int thickness);
10195    private static native double getFontScaleFromHeight_1(int fontFace, int pixelHeight);
10196
10197    // C++:  void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI)
10198    private static native void HoughLinesWithAccumulator_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta);
10199    private static native void HoughLinesWithAccumulator_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta);
10200    private static native void HoughLinesWithAccumulator_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn);
10201    private static native void HoughLinesWithAccumulator_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn);
10202    private static native void HoughLinesWithAccumulator_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
10203private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine);
10204
10205}