001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.calib3d;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.calib3d.UsacParams;
009import org.opencv.core.Mat;
010import org.opencv.core.MatOfDouble;
011import org.opencv.core.MatOfPoint2f;
012import org.opencv.core.MatOfPoint3f;
013import org.opencv.core.Point;
014import org.opencv.core.Rect;
015import org.opencv.core.Scalar;
016import org.opencv.core.Size;
017import org.opencv.core.TermCriteria;
018import org.opencv.utils.Converters;
019
020// C++: class Calib3d
021
022public class Calib3d {
023
024    // C++: enum <unnamed>
025    public static final int
026            CV_ITERATIVE = 0,
027            CV_EPNP = 1,
028            CV_P3P = 2,
029            CV_DLS = 3,
030            CvLevMarq_DONE = 0,
031            CvLevMarq_STARTED = 1,
032            CvLevMarq_CALC_J = 2,
033            CvLevMarq_CHECK_ERR = 3,
034            LMEDS = 4,
035            RANSAC = 8,
036            RHO = 16,
037            USAC_DEFAULT = 32,
038            USAC_PARALLEL = 33,
039            USAC_FM_8PTS = 34,
040            USAC_FAST = 35,
041            USAC_ACCURATE = 36,
042            USAC_PROSAC = 37,
043            USAC_MAGSAC = 38,
044            CALIB_CB_ADAPTIVE_THRESH = 1,
045            CALIB_CB_NORMALIZE_IMAGE = 2,
046            CALIB_CB_FILTER_QUADS = 4,
047            CALIB_CB_FAST_CHECK = 8,
048            CALIB_CB_EXHAUSTIVE = 16,
049            CALIB_CB_ACCURACY = 32,
050            CALIB_CB_LARGER = 64,
051            CALIB_CB_MARKER = 128,
052            CALIB_CB_SYMMETRIC_GRID = 1,
053            CALIB_CB_ASYMMETRIC_GRID = 2,
054            CALIB_CB_CLUSTERING = 4,
055            CALIB_NINTRINSIC = 18,
056            CALIB_USE_INTRINSIC_GUESS = 0x00001,
057            CALIB_FIX_ASPECT_RATIO = 0x00002,
058            CALIB_FIX_PRINCIPAL_POINT = 0x00004,
059            CALIB_ZERO_TANGENT_DIST = 0x00008,
060            CALIB_FIX_FOCAL_LENGTH = 0x00010,
061            CALIB_FIX_K1 = 0x00020,
062            CALIB_FIX_K2 = 0x00040,
063            CALIB_FIX_K3 = 0x00080,
064            CALIB_FIX_K4 = 0x00800,
065            CALIB_FIX_K5 = 0x01000,
066            CALIB_FIX_K6 = 0x02000,
067            CALIB_RATIONAL_MODEL = 0x04000,
068            CALIB_THIN_PRISM_MODEL = 0x08000,
069            CALIB_FIX_S1_S2_S3_S4 = 0x10000,
070            CALIB_TILTED_MODEL = 0x40000,
071            CALIB_FIX_TAUX_TAUY = 0x80000,
072            CALIB_USE_QR = 0x100000,
073            CALIB_FIX_TANGENT_DIST = 0x200000,
074            CALIB_FIX_INTRINSIC = 0x00100,
075            CALIB_SAME_FOCAL_LENGTH = 0x00200,
076            CALIB_ZERO_DISPARITY = 0x00400,
077            CALIB_USE_LU = (1 << 17),
078            CALIB_USE_EXTRINSIC_GUESS = (1 << 22),
079            FM_7POINT = 1,
080            FM_8POINT = 2,
081            FM_LMEDS = 4,
082            FM_RANSAC = 8,
083            fisheye_CALIB_USE_INTRINSIC_GUESS = 1 << 0,
084            fisheye_CALIB_RECOMPUTE_EXTRINSIC = 1 << 1,
085            fisheye_CALIB_CHECK_COND = 1 << 2,
086            fisheye_CALIB_FIX_SKEW = 1 << 3,
087            fisheye_CALIB_FIX_K1 = 1 << 4,
088            fisheye_CALIB_FIX_K2 = 1 << 5,
089            fisheye_CALIB_FIX_K3 = 1 << 6,
090            fisheye_CALIB_FIX_K4 = 1 << 7,
091            fisheye_CALIB_FIX_INTRINSIC = 1 << 8,
092            fisheye_CALIB_FIX_PRINCIPAL_POINT = 1 << 9,
093            fisheye_CALIB_ZERO_DISPARITY = 1 << 10,
094            fisheye_CALIB_FIX_FOCAL_LENGTH = 1 << 11;
095
096
097    // C++: enum GridType (cv.CirclesGridFinderParameters.GridType)
098    public static final int
099            CirclesGridFinderParameters_SYMMETRIC_GRID = 0,
100            CirclesGridFinderParameters_ASYMMETRIC_GRID = 1;
101
102
103    // C++: enum HandEyeCalibrationMethod (cv.HandEyeCalibrationMethod)
104    public static final int
105            CALIB_HAND_EYE_TSAI = 0,
106            CALIB_HAND_EYE_PARK = 1,
107            CALIB_HAND_EYE_HORAUD = 2,
108            CALIB_HAND_EYE_ANDREFF = 3,
109            CALIB_HAND_EYE_DANIILIDIS = 4;
110
111
112    // C++: enum LocalOptimMethod (cv.LocalOptimMethod)
113    public static final int
114            LOCAL_OPTIM_NULL = 0,
115            LOCAL_OPTIM_INNER_LO = 1,
116            LOCAL_OPTIM_INNER_AND_ITER_LO = 2,
117            LOCAL_OPTIM_GC = 3,
118            LOCAL_OPTIM_SIGMA = 4;
119
120
121    // C++: enum NeighborSearchMethod (cv.NeighborSearchMethod)
122    public static final int
123            NEIGH_FLANN_KNN = 0,
124            NEIGH_GRID = 1,
125            NEIGH_FLANN_RADIUS = 2;
126
127
128    // C++: enum PolishingMethod (cv.PolishingMethod)
129    public static final int
130            NONE_POLISHER = 0,
131            LSQ_POLISHER = 1,
132            MAGSAC = 2,
133            COV_POLISHER = 3;
134
135
136    // C++: enum RobotWorldHandEyeCalibrationMethod (cv.RobotWorldHandEyeCalibrationMethod)
137    public static final int
138            CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0,
139            CALIB_ROBOT_WORLD_HAND_EYE_LI = 1;
140
141
142    // C++: enum SamplingMethod (cv.SamplingMethod)
143    public static final int
144            SAMPLING_UNIFORM = 0,
145            SAMPLING_PROGRESSIVE_NAPSAC = 1,
146            SAMPLING_NAPSAC = 2,
147            SAMPLING_PROSAC = 3;
148
149
150    // C++: enum ScoreMethod (cv.ScoreMethod)
151    public static final int
152            SCORE_METHOD_RANSAC = 0,
153            SCORE_METHOD_MSAC = 1,
154            SCORE_METHOD_MAGSAC = 2,
155            SCORE_METHOD_LMEDS = 3;
156
157
158    // C++: enum SolvePnPMethod (cv.SolvePnPMethod)
159    public static final int
160            SOLVEPNP_ITERATIVE = 0,
161            SOLVEPNP_EPNP = 1,
162            SOLVEPNP_P3P = 2,
163            SOLVEPNP_DLS = 3,
164            SOLVEPNP_UPNP = 4,
165            SOLVEPNP_AP3P = 5,
166            SOLVEPNP_IPPE = 6,
167            SOLVEPNP_IPPE_SQUARE = 7,
168            SOLVEPNP_SQPNP = 8,
169            SOLVEPNP_MAX_COUNT = 8+1;
170
171
172    // C++: enum UndistortTypes (cv.UndistortTypes)
173    public static final int
174            PROJ_SPHERICAL_ORTHO = 0,
175            PROJ_SPHERICAL_EQRECT = 1;
176
177
178    //
179    // C++:  void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
180    //
181
182    /**
183     * Converts a rotation matrix to a rotation vector or vice versa.
184     *
185     * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
186     * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
187     * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
188     * derivatives of the output array components with respect to the input array components.
189     *
190     * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r  \leftarrow r/ \theta \\ R =  \cos(\theta) I + (1- \cos{\theta} ) r r^T +  \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
191     *
192     * Inverse transformation can be also done easily, since
193     *
194     * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
195     *
196     * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
197     * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
198     * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
199     *
200     * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
201     * can be found in:
202     * <ul>
203     *   <li>
204     *      A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
205     *   </li>
206     * </ul>
207     *
208     * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
209     * <ul>
210     *   <li>
211     *      A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
212     *   </li>
213     *   <li>
214     *      Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
215     *   </li>
216     *   <li>
217     *      A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
218     *   </li>
219     * </ul>
220     */
221    public static void Rodrigues(Mat src, Mat dst, Mat jacobian) {
222        Rodrigues_0(src.nativeObj, dst.nativeObj, jacobian.nativeObj);
223    }
224
225    /**
226     * Converts a rotation matrix to a rotation vector or vice versa.
227     *
228     * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
229     * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
230     * derivatives of the output array components with respect to the input array components.
231     *
232     * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r  \leftarrow r/ \theta \\ R =  \cos(\theta) I + (1- \cos{\theta} ) r r^T +  \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\)
233     *
234     * Inverse transformation can be also done easily, since
235     *
236     * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\)
237     *
238     * A rotation vector is a convenient and most compact representation of a rotation matrix (since any
239     * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
240     * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP .
241     *
242     * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
243     * can be found in:
244     * <ul>
245     *   <li>
246     *      A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF
247     *   </li>
248     * </ul>
249     *
250     * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in:
251     * <ul>
252     *   <li>
253     *      A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial
254     *   </li>
255     *   <li>
256     *      Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17
257     *   </li>
258     *   <li>
259     *      A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML
260     *   </li>
261     * </ul>
262     */
263    public static void Rodrigues(Mat src, Mat dst) {
264        Rodrigues_1(src.nativeObj, dst.nativeObj);
265    }
266
267
268    //
269    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
270    //
271
272    /**
273     * Finds a perspective transformation between two planes.
274     *
275     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
276     * or vector&lt;Point2f&gt; .
277     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
278     * a vector&lt;Point2f&gt; .
279     * @param method Method used to compute a homography matrix. The following methods are possible:
280     * <ul>
281     *   <li>
282     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
283     *   </li>
284     *   <li>
285     *    REF: RANSAC - RANSAC-based robust method
286     *   </li>
287     *   <li>
288     *    REF: LMEDS - Least-Median robust method
289     *   </li>
290     *   <li>
291     *    REF: RHO - PROSAC-based robust method
292     *   </li>
293     * </ul>
294     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
295     * (used in the RANSAC and RHO methods only). That is, if
296     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
297     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
298     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
299     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
300     * mask values are ignored.
301     * @param maxIters The maximum number of RANSAC iterations.
302     * @param confidence Confidence level, between 0 and 1.
303     *
304     * The function finds and returns the perspective transformation \(H\) between the source and the
305     * destination planes:
306     *
307     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
308     *
309     * so that the back-projection error
310     *
311     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
312     *
313     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
314     * pairs to compute an initial homography estimate with a simple least-squares scheme.
315     *
316     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
317     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
318     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
319     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
320     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
321     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
322     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
323     * the mask of inliers/outliers.
324     *
325     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
326     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
327     * re-projection error even more.
328     *
329     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
330     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
331     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
332     * noise is rather small, use the default method (method=0).
333     *
334     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
335     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
336     * cannot be estimated, an empty one will be returned.
337     *
338     * SEE:
339     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
340     * perspectiveTransform
341     * @return automatically generated
342     */
343    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters, double confidence) {
344        Mat srcPoints_mat = srcPoints;
345        Mat dstPoints_mat = dstPoints;
346        return new Mat(findHomography_0(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters, confidence));
347    }
348
349    /**
350     * Finds a perspective transformation between two planes.
351     *
352     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
353     * or vector&lt;Point2f&gt; .
354     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
355     * a vector&lt;Point2f&gt; .
356     * @param method Method used to compute a homography matrix. The following methods are possible:
357     * <ul>
358     *   <li>
359     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
360     *   </li>
361     *   <li>
362     *    REF: RANSAC - RANSAC-based robust method
363     *   </li>
364     *   <li>
365     *    REF: LMEDS - Least-Median robust method
366     *   </li>
367     *   <li>
368     *    REF: RHO - PROSAC-based robust method
369     *   </li>
370     * </ul>
371     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
372     * (used in the RANSAC and RHO methods only). That is, if
373     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
374     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
375     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
376     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
377     * mask values are ignored.
378     * @param maxIters The maximum number of RANSAC iterations.
379     *
380     * The function finds and returns the perspective transformation \(H\) between the source and the
381     * destination planes:
382     *
383     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
384     *
385     * so that the back-projection error
386     *
387     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
388     *
389     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
390     * pairs to compute an initial homography estimate with a simple least-squares scheme.
391     *
392     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
393     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
394     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
395     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
396     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
397     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
398     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
399     * the mask of inliers/outliers.
400     *
401     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
402     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
403     * re-projection error even more.
404     *
405     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
406     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
407     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
408     * noise is rather small, use the default method (method=0).
409     *
410     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
411     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
412     * cannot be estimated, an empty one will be returned.
413     *
414     * SEE:
415     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
416     * perspectiveTransform
417     * @return automatically generated
418     */
419    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters) {
420        Mat srcPoints_mat = srcPoints;
421        Mat dstPoints_mat = dstPoints;
422        return new Mat(findHomography_1(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters));
423    }
424
425    /**
426     * Finds a perspective transformation between two planes.
427     *
428     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
429     * or vector&lt;Point2f&gt; .
430     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
431     * a vector&lt;Point2f&gt; .
432     * @param method Method used to compute a homography matrix. The following methods are possible:
433     * <ul>
434     *   <li>
435     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
436     *   </li>
437     *   <li>
438     *    REF: RANSAC - RANSAC-based robust method
439     *   </li>
440     *   <li>
441     *    REF: LMEDS - Least-Median robust method
442     *   </li>
443     *   <li>
444     *    REF: RHO - PROSAC-based robust method
445     *   </li>
446     * </ul>
447     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
448     * (used in the RANSAC and RHO methods only). That is, if
449     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
450     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
451     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
452     * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
453     * mask values are ignored.
454     *
455     * The function finds and returns the perspective transformation \(H\) between the source and the
456     * destination planes:
457     *
458     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
459     *
460     * so that the back-projection error
461     *
462     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
463     *
464     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
465     * pairs to compute an initial homography estimate with a simple least-squares scheme.
466     *
467     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
468     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
469     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
470     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
471     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
472     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
473     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
474     * the mask of inliers/outliers.
475     *
476     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
477     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
478     * re-projection error even more.
479     *
480     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
481     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
482     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
483     * noise is rather small, use the default method (method=0).
484     *
485     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
486     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
487     * cannot be estimated, an empty one will be returned.
488     *
489     * SEE:
490     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
491     * perspectiveTransform
492     * @return automatically generated
493     */
494    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask) {
495        Mat srcPoints_mat = srcPoints;
496        Mat dstPoints_mat = dstPoints;
497        return new Mat(findHomography_2(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj));
498    }
499
500    /**
501     * Finds a perspective transformation between two planes.
502     *
503     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
504     * or vector&lt;Point2f&gt; .
505     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
506     * a vector&lt;Point2f&gt; .
507     * @param method Method used to compute a homography matrix. The following methods are possible:
508     * <ul>
509     *   <li>
510     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
511     *   </li>
512     *   <li>
513     *    REF: RANSAC - RANSAC-based robust method
514     *   </li>
515     *   <li>
516     *    REF: LMEDS - Least-Median robust method
517     *   </li>
518     *   <li>
519     *    REF: RHO - PROSAC-based robust method
520     *   </li>
521     * </ul>
522     * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
523     * (used in the RANSAC and RHO methods only). That is, if
524     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
525     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
526     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
527     * mask values are ignored.
528     *
529     * The function finds and returns the perspective transformation \(H\) between the source and the
530     * destination planes:
531     *
532     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
533     *
534     * so that the back-projection error
535     *
536     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
537     *
538     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
539     * pairs to compute an initial homography estimate with a simple least-squares scheme.
540     *
541     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
542     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
543     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
544     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
545     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
546     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
547     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
548     * the mask of inliers/outliers.
549     *
550     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
551     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
552     * re-projection error even more.
553     *
554     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
555     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
556     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
557     * noise is rather small, use the default method (method=0).
558     *
559     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
560     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
561     * cannot be estimated, an empty one will be returned.
562     *
563     * SEE:
564     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
565     * perspectiveTransform
566     * @return automatically generated
567     */
568    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold) {
569        Mat srcPoints_mat = srcPoints;
570        Mat dstPoints_mat = dstPoints;
571        return new Mat(findHomography_3(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold));
572    }
573
574    /**
575     * Finds a perspective transformation between two planes.
576     *
577     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
578     * or vector&lt;Point2f&gt; .
579     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
580     * a vector&lt;Point2f&gt; .
581     * @param method Method used to compute a homography matrix. The following methods are possible:
582     * <ul>
583     *   <li>
584     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
585     *   </li>
586     *   <li>
587     *    REF: RANSAC - RANSAC-based robust method
588     *   </li>
589     *   <li>
590     *    REF: LMEDS - Least-Median robust method
591     *   </li>
592     *   <li>
593     *    REF: RHO - PROSAC-based robust method
594     *   </li>
595     * </ul>
596     * (used in the RANSAC and RHO methods only). That is, if
597     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
598     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
599     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
600     * mask values are ignored.
601     *
602     * The function finds and returns the perspective transformation \(H\) between the source and the
603     * destination planes:
604     *
605     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
606     *
607     * so that the back-projection error
608     *
609     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
610     *
611     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
612     * pairs to compute an initial homography estimate with a simple least-squares scheme.
613     *
614     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
615     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
616     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
617     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
618     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
619     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
620     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
621     * the mask of inliers/outliers.
622     *
623     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
624     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
625     * re-projection error even more.
626     *
627     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
628     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
629     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
630     * noise is rather small, use the default method (method=0).
631     *
632     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
633     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
634     * cannot be estimated, an empty one will be returned.
635     *
636     * SEE:
637     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
638     * perspectiveTransform
639     * @return automatically generated
640     */
641    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method) {
642        Mat srcPoints_mat = srcPoints;
643        Mat dstPoints_mat = dstPoints;
644        return new Mat(findHomography_4(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method));
645    }
646
647    /**
648     * Finds a perspective transformation between two planes.
649     *
650     * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
651     * or vector&lt;Point2f&gt; .
652     * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
653     * a vector&lt;Point2f&gt; .
654     * <ul>
655     *   <li>
656     *    <b>0</b> - a regular method using all the points, i.e., the least squares method
657     *   </li>
658     *   <li>
659     *    REF: RANSAC - RANSAC-based robust method
660     *   </li>
661     *   <li>
662     *    REF: LMEDS - Least-Median robust method
663     *   </li>
664     *   <li>
665     *    REF: RHO - PROSAC-based robust method
666     *   </li>
667     * </ul>
668     * (used in the RANSAC and RHO methods only). That is, if
669     * \(\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2  &gt;  \texttt{ransacReprojThreshold}\)
670     * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
671     * it usually makes sense to set this parameter somewhere in the range of 1 to 10.
672     * mask values are ignored.
673     *
674     * The function finds and returns the perspective transformation \(H\) between the source and the
675     * destination planes:
676     *
677     * \(s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\)
678     *
679     * so that the back-projection error
680     *
681     * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\)
682     *
683     * is minimized. If the parameter method is set to the default value 0, the function uses all the point
684     * pairs to compute an initial homography estimate with a simple least-squares scheme.
685     *
686     * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective
687     * transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
688     * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
689     * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
690     * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
691     * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
692     * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
693     * the mask of inliers/outliers.
694     *
695     * Regardless of the method, robust or not, the computed homography matrix is refined further (using
696     * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
697     * re-projection error even more.
698     *
699     * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
700     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
701     * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
702     * noise is rather small, use the default method (method=0).
703     *
704     * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
705     * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix
706     * cannot be estimated, an empty one will be returned.
707     *
708     * SEE:
709     * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
710     * perspectiveTransform
711     * @return automatically generated
712     */
713    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints) {
714        Mat srcPoints_mat = srcPoints;
715        Mat dstPoints_mat = dstPoints;
716        return new Mat(findHomography_5(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj));
717    }
718
719
720    //
721    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params)
722    //
723
724    public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, Mat mask, UsacParams params) {
725        Mat srcPoints_mat = srcPoints;
726        Mat dstPoints_mat = dstPoints;
727        return new Mat(findHomography_6(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, mask.nativeObj, params.nativeObj));
728    }
729
730
731    //
732    // C++:  Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
733    //
734
735    /**
736     * Computes an RQ decomposition of 3x3 matrices.
737     *
738     * @param src 3x3 input matrix.
739     * @param mtxR Output 3x3 upper-triangular matrix.
740     * @param mtxQ Output 3x3 orthogonal matrix.
741     * @param Qx Optional output 3x3 rotation matrix around x-axis.
742     * @param Qy Optional output 3x3 rotation matrix around y-axis.
743     * @param Qz Optional output 3x3 rotation matrix around z-axis.
744     *
745     * The function computes a RQ decomposition using the given rotations. This function is used in
746     * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
747     * and a rotation matrix.
748     *
749     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
750     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
751     * sequence of rotations about the three principal axes that results in the same orientation of an
752     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
753     * are only one of the possible solutions.
754     * @return automatically generated
755     */
756    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz) {
757        return RQDecomp3x3_0(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj, Qz.nativeObj);
758    }
759
760    /**
761     * Computes an RQ decomposition of 3x3 matrices.
762     *
763     * @param src 3x3 input matrix.
764     * @param mtxR Output 3x3 upper-triangular matrix.
765     * @param mtxQ Output 3x3 orthogonal matrix.
766     * @param Qx Optional output 3x3 rotation matrix around x-axis.
767     * @param Qy Optional output 3x3 rotation matrix around y-axis.
768     *
769     * The function computes a RQ decomposition using the given rotations. This function is used in
770     * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
771     * and a rotation matrix.
772     *
773     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
774     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
775     * sequence of rotations about the three principal axes that results in the same orientation of an
776     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
777     * are only one of the possible solutions.
778     * @return automatically generated
779     */
780    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy) {
781        return RQDecomp3x3_1(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj);
782    }
783
784    /**
785     * Computes an RQ decomposition of 3x3 matrices.
786     *
787     * @param src 3x3 input matrix.
788     * @param mtxR Output 3x3 upper-triangular matrix.
789     * @param mtxQ Output 3x3 orthogonal matrix.
790     * @param Qx Optional output 3x3 rotation matrix around x-axis.
791     *
792     * The function computes a RQ decomposition using the given rotations. This function is used in
793     * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
794     * and a rotation matrix.
795     *
796     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
797     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
798     * sequence of rotations about the three principal axes that results in the same orientation of an
799     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
800     * are only one of the possible solutions.
801     * @return automatically generated
802     */
803    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx) {
804        return RQDecomp3x3_2(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj);
805    }
806
807    /**
808     * Computes an RQ decomposition of 3x3 matrices.
809     *
810     * @param src 3x3 input matrix.
811     * @param mtxR Output 3x3 upper-triangular matrix.
812     * @param mtxQ Output 3x3 orthogonal matrix.
813     *
814     * The function computes a RQ decomposition using the given rotations. This function is used in
815     * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
816     * and a rotation matrix.
817     *
818     * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
819     * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
820     * sequence of rotations about the three principal axes that results in the same orientation of an
821     * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
822     * are only one of the possible solutions.
823     * @return automatically generated
824     */
825    public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ) {
826        return RQDecomp3x3_3(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj);
827    }
828
829
830    //
831    // C++:  void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
832    //
833
834    /**
835     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
836     *
837     * @param projMatrix 3x4 input projection matrix P.
838     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
839     * @param rotMatrix Output 3x3 external rotation matrix R.
840     * @param transVect Output 4x1 translation vector T.
841     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
842     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
843     * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
844     * @param eulerAngles Optional three-element vector containing three Euler angles of rotation in
845     * degrees.
846     *
847     * The function computes a decomposition of a projection matrix into a calibration and a rotation
848     * matrix and the position of a camera.
849     *
850     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
851     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
852     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
853     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
854     *
855     * The function is based on #RQDecomp3x3 .
856     */
857    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles) {
858        decomposeProjectionMatrix_0(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj, eulerAngles.nativeObj);
859    }
860
861    /**
862     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
863     *
864     * @param projMatrix 3x4 input projection matrix P.
865     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
866     * @param rotMatrix Output 3x3 external rotation matrix R.
867     * @param transVect Output 4x1 translation vector T.
868     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
869     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
870     * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
871     * degrees.
872     *
873     * The function computes a decomposition of a projection matrix into a calibration and a rotation
874     * matrix and the position of a camera.
875     *
876     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
877     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
878     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
879     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
880     *
881     * The function is based on #RQDecomp3x3 .
882     */
883    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ) {
884        decomposeProjectionMatrix_1(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj);
885    }
886
887    /**
888     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
889     *
890     * @param projMatrix 3x4 input projection matrix P.
891     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
892     * @param rotMatrix Output 3x3 external rotation matrix R.
893     * @param transVect Output 4x1 translation vector T.
894     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
895     * @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
896     * degrees.
897     *
898     * The function computes a decomposition of a projection matrix into a calibration and a rotation
899     * matrix and the position of a camera.
900     *
901     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
902     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
903     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
904     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
905     *
906     * The function is based on #RQDecomp3x3 .
907     */
908    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY) {
909        decomposeProjectionMatrix_2(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj);
910    }
911
912    /**
913     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
914     *
915     * @param projMatrix 3x4 input projection matrix P.
916     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
917     * @param rotMatrix Output 3x3 external rotation matrix R.
918     * @param transVect Output 4x1 translation vector T.
919     * @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
920     * degrees.
921     *
922     * The function computes a decomposition of a projection matrix into a calibration and a rotation
923     * matrix and the position of a camera.
924     *
925     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
926     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
927     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
928     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
929     *
930     * The function is based on #RQDecomp3x3 .
931     */
932    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX) {
933        decomposeProjectionMatrix_3(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj);
934    }
935
936    /**
937     * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
938     *
939     * @param projMatrix 3x4 input projection matrix P.
940     * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\).
941     * @param rotMatrix Output 3x3 external rotation matrix R.
942     * @param transVect Output 4x1 translation vector T.
943     * degrees.
944     *
945     * The function computes a decomposition of a projection matrix into a calibration and a rotation
946     * matrix and the position of a camera.
947     *
948     * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
949     * be used in OpenGL. Note, there is always more than one sequence of rotations about the three
950     * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned
951     * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
952     *
953     * The function is based on #RQDecomp3x3 .
954     */
955    public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect) {
956        decomposeProjectionMatrix_4(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj);
957    }
958
959
960    //
961    // C++:  void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
962    //
963
964    /**
965     * Computes partial derivatives of the matrix product for each multiplied matrix.
966     *
967     * @param A First multiplied matrix.
968     * @param B Second multiplied matrix.
969     * @param dABdA First output derivative matrix d(A\*B)/dA of size
970     * \(\texttt{A.rows*B.cols} \times {A.rows*A.cols}\) .
971     * @param dABdB Second output derivative matrix d(A\*B)/dB of size
972     * \(\texttt{A.rows*B.cols} \times {B.rows*B.cols}\) .
973     *
974     * The function computes partial derivatives of the elements of the matrix product \(A*B\) with regard to
975     * the elements of each of the two input matrices. The function is used to compute the Jacobian
976     * matrices in #stereoCalibrate but can also be used in any other similar optimization function.
977     */
978    public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB) {
979        matMulDeriv_0(A.nativeObj, B.nativeObj, dABdA.nativeObj, dABdB.nativeObj);
980    }
981
982
983    //
984    // C++:  void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
985    //
986
987    /**
988     * Combines two rotation-and-shift transformations.
989     *
990     * @param rvec1 First rotation vector.
991     * @param tvec1 First translation vector.
992     * @param rvec2 Second rotation vector.
993     * @param tvec2 Second translation vector.
994     * @param rvec3 Output rotation vector of the superposition.
995     * @param tvec3 Output translation vector of the superposition.
996     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
997     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
998     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
999     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1000     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1001     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
1002     * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
1003     * @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
1004     *
1005     * The functions compute:
1006     *
1007     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1008     *
1009     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1010     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1011     *
1012     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1013     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1014     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1015     * function that contains a matrix multiplication.
1016     */
1017    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2) {
1018        composeRT_0(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj, dt3dt2.nativeObj);
1019    }
1020
1021    /**
1022     * Combines two rotation-and-shift transformations.
1023     *
1024     * @param rvec1 First rotation vector.
1025     * @param tvec1 First translation vector.
1026     * @param rvec2 Second rotation vector.
1027     * @param tvec2 Second translation vector.
1028     * @param rvec3 Output rotation vector of the superposition.
1029     * @param tvec3 Output translation vector of the superposition.
1030     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1031     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1032     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1033     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1034     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1035     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
1036     * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
1037     *
1038     * The functions compute:
1039     *
1040     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1041     *
1042     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1043     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1044     *
1045     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1046     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1047     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1048     * function that contains a matrix multiplication.
1049     */
1050    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2) {
1051        composeRT_1(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj);
1052    }
1053
1054    /**
1055     * Combines two rotation-and-shift transformations.
1056     *
1057     * @param rvec1 First rotation vector.
1058     * @param tvec1 First translation vector.
1059     * @param rvec2 Second rotation vector.
1060     * @param tvec2 Second translation vector.
1061     * @param rvec3 Output rotation vector of the superposition.
1062     * @param tvec3 Output translation vector of the superposition.
1063     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1064     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1065     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1066     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1067     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1068     * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
1069     *
1070     * The functions compute:
1071     *
1072     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1073     *
1074     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1075     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1076     *
1077     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1078     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1079     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1080     * function that contains a matrix multiplication.
1081     */
1082    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1) {
1083        composeRT_2(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj);
1084    }
1085
1086    /**
1087     * Combines two rotation-and-shift transformations.
1088     *
1089     * @param rvec1 First rotation vector.
1090     * @param tvec1 First translation vector.
1091     * @param rvec2 Second rotation vector.
1092     * @param tvec2 Second translation vector.
1093     * @param rvec3 Output rotation vector of the superposition.
1094     * @param tvec3 Output translation vector of the superposition.
1095     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1096     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1097     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1098     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1099     * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
1100     *
1101     * The functions compute:
1102     *
1103     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1104     *
1105     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1106     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1107     *
1108     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1109     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1110     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1111     * function that contains a matrix multiplication.
1112     */
1113    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1) {
1114        composeRT_3(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj);
1115    }
1116
1117    /**
1118     * Combines two rotation-and-shift transformations.
1119     *
1120     * @param rvec1 First rotation vector.
1121     * @param tvec1 First translation vector.
1122     * @param rvec2 Second rotation vector.
1123     * @param tvec2 Second translation vector.
1124     * @param rvec3 Output rotation vector of the superposition.
1125     * @param tvec3 Output translation vector of the superposition.
1126     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1127     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1128     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1129     * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
1130     *
1131     * The functions compute:
1132     *
1133     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1134     *
1135     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1136     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1137     *
1138     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1139     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1140     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1141     * function that contains a matrix multiplication.
1142     */
1143    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2) {
1144        composeRT_4(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj);
1145    }
1146
1147    /**
1148     * Combines two rotation-and-shift transformations.
1149     *
1150     * @param rvec1 First rotation vector.
1151     * @param tvec1 First translation vector.
1152     * @param rvec2 Second rotation vector.
1153     * @param tvec2 Second translation vector.
1154     * @param rvec3 Output rotation vector of the superposition.
1155     * @param tvec3 Output translation vector of the superposition.
1156     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1157     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1158     * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
1159     *
1160     * The functions compute:
1161     *
1162     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1163     *
1164     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1165     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1166     *
1167     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1168     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1169     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1170     * function that contains a matrix multiplication.
1171     */
1172    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2) {
1173        composeRT_5(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj);
1174    }
1175
1176    /**
1177     * Combines two rotation-and-shift transformations.
1178     *
1179     * @param rvec1 First rotation vector.
1180     * @param tvec1 First translation vector.
1181     * @param rvec2 Second rotation vector.
1182     * @param tvec2 Second translation vector.
1183     * @param rvec3 Output rotation vector of the superposition.
1184     * @param tvec3 Output translation vector of the superposition.
1185     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1186     * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
1187     *
1188     * The functions compute:
1189     *
1190     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1191     *
1192     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1193     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1194     *
1195     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1196     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1197     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1198     * function that contains a matrix multiplication.
1199     */
1200    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1) {
1201        composeRT_6(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj);
1202    }
1203
1204    /**
1205     * Combines two rotation-and-shift transformations.
1206     *
1207     * @param rvec1 First rotation vector.
1208     * @param tvec1 First translation vector.
1209     * @param rvec2 Second rotation vector.
1210     * @param tvec2 Second translation vector.
1211     * @param rvec3 Output rotation vector of the superposition.
1212     * @param tvec3 Output translation vector of the superposition.
1213     * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
1214     *
1215     * The functions compute:
1216     *
1217     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1218     *
1219     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1220     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1221     *
1222     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1223     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1224     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1225     * function that contains a matrix multiplication.
1226     */
1227    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1) {
1228        composeRT_7(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj);
1229    }
1230
1231    /**
1232     * Combines two rotation-and-shift transformations.
1233     *
1234     * @param rvec1 First rotation vector.
1235     * @param tvec1 First translation vector.
1236     * @param rvec2 Second rotation vector.
1237     * @param tvec2 Second translation vector.
1238     * @param rvec3 Output rotation vector of the superposition.
1239     * @param tvec3 Output translation vector of the superposition.
1240     *
1241     * The functions compute:
1242     *
1243     * \(\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\)
1244     *
1245     * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and
1246     * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See #Rodrigues for details.
1247     *
1248     * Also, the functions can compute the derivatives of the output vectors with regards to the input
1249     * vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
1250     * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
1251     * function that contains a matrix multiplication.
1252     */
1253    public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3) {
1254        composeRT_8(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj);
1255    }
1256
1257
1258    //
1259    // C++:  void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
1260    //
1261
1262    /**
1263     * Projects 3D points to an image plane.
1264     *
1265     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1266     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1267     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1268     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1269     * @param tvec The translation vector, see parameter description above.
1270     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1271     * @param distCoeffs Input vector of distortion coefficients
1272     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1273     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1274     * vector&lt;Point2f&gt; .
1275     * @param jacobian Optional output 2Nx(10+&lt;numDistCoeffs&gt;) jacobian matrix of derivatives of image
1276     * points with respect to components of the rotation vector, translation vector, focal lengths,
1277     * coordinates of the principal point and the distortion coefficients. In the old interface different
1278     * components of the jacobian are returned via different output parameters.
1279     * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
1280     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1281     * jacobian matrix.
1282     *
1283     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1284     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1285     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1286     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1287     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1288     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1289     * parameters.
1290     *
1291     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1292     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1293     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1294     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1295     */
1296    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio) {
1297        Mat objectPoints_mat = objectPoints;
1298        Mat distCoeffs_mat = distCoeffs;
1299        Mat imagePoints_mat = imagePoints;
1300        projectPoints_0(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj, aspectRatio);
1301    }
1302
1303    /**
1304     * Projects 3D points to an image plane.
1305     *
1306     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1307     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1308     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1309     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1310     * @param tvec The translation vector, see parameter description above.
1311     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1312     * @param distCoeffs Input vector of distortion coefficients
1313     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1314     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1315     * vector&lt;Point2f&gt; .
1316     * @param jacobian Optional output 2Nx(10+&lt;numDistCoeffs&gt;) jacobian matrix of derivatives of image
1317     * points with respect to components of the rotation vector, translation vector, focal lengths,
1318     * coordinates of the principal point and the distortion coefficients. In the old interface different
1319     * components of the jacobian are returned via different output parameters.
1320     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1321     * jacobian matrix.
1322     *
1323     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1324     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1325     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1326     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1327     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1328     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1329     * parameters.
1330     *
1331     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1332     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1333     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1334     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1335     */
1336    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian) {
1337        Mat objectPoints_mat = objectPoints;
1338        Mat distCoeffs_mat = distCoeffs;
1339        Mat imagePoints_mat = imagePoints;
1340        projectPoints_1(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj);
1341    }
1342
1343    /**
1344     * Projects 3D points to an image plane.
1345     *
1346     * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
1347     * 1-channel or 1xN/Nx1 3-channel (or vector&lt;Point3f&gt; ), where N is the number of points in the view.
1348     * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of
1349     * basis from world to camera coordinate system, see REF: calibrateCamera for details.
1350     * @param tvec The translation vector, see parameter description above.
1351     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
1352     * @param distCoeffs Input vector of distortion coefficients
1353     * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed.
1354     * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
1355     * vector&lt;Point2f&gt; .
1356     * points with respect to components of the rotation vector, translation vector, focal lengths,
1357     * coordinates of the principal point and the distortion coefficients. In the old interface different
1358     * components of the jacobian are returned via different output parameters.
1359     * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the
1360     * jacobian matrix.
1361     *
1362     * The function computes the 2D projections of 3D points to the image plane, given intrinsic and
1363     * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
1364     * derivatives of image points coordinates (as functions of all the input parameters) with respect to
1365     * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
1366     * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself
1367     * can also be used to compute a re-projection error, given the current intrinsic and extrinsic
1368     * parameters.
1369     *
1370     * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix,
1371     * or by passing zero distortion coefficients, one can get various useful partial cases of the
1372     * function. This means, one can compute the distorted coordinates for a sparse set of points or apply
1373     * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
1374     */
1375    public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints) {
1376        Mat objectPoints_mat = objectPoints;
1377        Mat distCoeffs_mat = distCoeffs;
1378        Mat imagePoints_mat = imagePoints;
1379        projectPoints_2(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj);
1380    }
1381
1382
1383    //
1384    // C++:  bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
1385    //
1386
1387    /**
1388     * Finds an object pose from 3D-2D point correspondences.
1389     *
1390     * SEE: REF: calib3d_solvePnP
1391     *
1392     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1393     * coordinate frame to the camera coordinate frame, using different methods:
1394     * <ul>
1395     *   <li>
1396     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1397     *   </li>
1398     *   <li>
1399     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1400     *   </li>
1401     *   <li>
1402     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1403     * Number of input points must be 4. Object points must be defined in the following order:
1404     *   <ul>
1405     *     <li>
1406     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1407     *     </li>
1408     *     <li>
1409     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1410     *     </li>
1411     *     <li>
1412     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1413     *     </li>
1414     *     <li>
1415     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1416     *     </li>
1417     *   </ul>
1418     *   <li>
1419     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1420     *   </li>
1421     * </ul>
1422     *
1423     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1424     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1425     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1426     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1427     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1428     * @param distCoeffs Input vector of distortion coefficients
1429     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1430     * assumed.
1431     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1432     * the model coordinate system to the camera coordinate system.
1433     * @param tvec Output translation vector.
1434     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
1435     * the provided rvec and tvec values as initial approximations of the rotation and translation
1436     * vectors, respectively, and further optimizes them.
1437     * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
1438     *
1439     * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
1440     *
1441     * <b>Note:</b>
1442     * <ul>
1443     *   <li>
1444     *       An example of how to use solvePnP for planar augmented reality can be found at
1445     *         opencv_source_code/samples/python/plane_ar.py
1446     *   </li>
1447     *   <li>
1448     *       If you are using Python:
1449     *   <ul>
1450     *     <li>
1451     *          Numpy array slices won't work as input because solvePnP requires contiguous
1452     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
1453     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
1454     *     </li>
1455     *     <li>
1456     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
1457     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
1458     *         which requires 2-channel information.
1459     *     </li>
1460     *     <li>
1461     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
1462     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
1463     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
1464     *     </li>
1465     *   </ul>
1466     *   <li>
1467     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
1468     *        unstable and sometimes give completely wrong results. If you pass one of these two
1469     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
1470     *   </li>
1471     *   <li>
1472     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
1473     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
1474     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
1475     *   </li>
1476     *   <li>
1477     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
1478     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
1479     *        global solution to converge.
1480     *   </li>
1481     *   <li>
1482     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
1483     *   </li>
1484     *   <li>
1485     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
1486     *        Number of input points must be 4. Object points must be defined in the following order:
1487     *   <ul>
1488     *     <li>
1489     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
1490     *     </li>
1491     *     <li>
1492     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
1493     *     </li>
1494     *     <li>
1495     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
1496     *     </li>
1497     *     <li>
1498     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
1499     *     </li>
1500     *   </ul>
1501     *   <ul>
1502     *     <li>
1503     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
1504     *     </li>
1505     *   </ul>
1506     *   </li>
1507     * </ul>
1508     * @return automatically generated
1509     */
1510    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags) {
1511        Mat objectPoints_mat = objectPoints;
1512        Mat imagePoints_mat = imagePoints;
1513        Mat distCoeffs_mat = distCoeffs;
1514        return solvePnP_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, flags);
1515    }
1516
1517    /**
1518     * Finds an object pose from 3D-2D point correspondences.
1519     *
1520     * SEE: REF: calib3d_solvePnP
1521     *
1522     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1523     * coordinate frame to the camera coordinate frame, using different methods:
1524     * <ul>
1525     *   <li>
1526     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1527     *   </li>
1528     *   <li>
1529     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1530     *   </li>
1531     *   <li>
1532     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1533     * Number of input points must be 4. Object points must be defined in the following order:
1534     *   <ul>
1535     *     <li>
1536     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1537     *     </li>
1538     *     <li>
1539     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1540     *     </li>
1541     *     <li>
1542     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1543     *     </li>
1544     *     <li>
1545     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1546     *     </li>
1547     *   </ul>
1548     *   <li>
1549     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1550     *   </li>
1551     * </ul>
1552     *
1553     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1554     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1555     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1556     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1557     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1558     * @param distCoeffs Input vector of distortion coefficients
1559     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1560     * assumed.
1561     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1562     * the model coordinate system to the camera coordinate system.
1563     * @param tvec Output translation vector.
1564     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
1565     * the provided rvec and tvec values as initial approximations of the rotation and translation
1566     * vectors, respectively, and further optimizes them.
1567     *
1568     * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
1569     *
1570     * <b>Note:</b>
1571     * <ul>
1572     *   <li>
1573     *       An example of how to use solvePnP for planar augmented reality can be found at
1574     *         opencv_source_code/samples/python/plane_ar.py
1575     *   </li>
1576     *   <li>
1577     *       If you are using Python:
1578     *   <ul>
1579     *     <li>
1580     *          Numpy array slices won't work as input because solvePnP requires contiguous
1581     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
1582     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
1583     *     </li>
1584     *     <li>
1585     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
1586     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
1587     *         which requires 2-channel information.
1588     *     </li>
1589     *     <li>
1590     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
1591     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
1592     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
1593     *     </li>
1594     *   </ul>
1595     *   <li>
1596     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
1597     *        unstable and sometimes give completely wrong results. If you pass one of these two
1598     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
1599     *   </li>
1600     *   <li>
1601     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
1602     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
1603     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
1604     *   </li>
1605     *   <li>
1606     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
1607     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
1608     *        global solution to converge.
1609     *   </li>
1610     *   <li>
1611     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
1612     *   </li>
1613     *   <li>
1614     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
1615     *        Number of input points must be 4. Object points must be defined in the following order:
1616     *   <ul>
1617     *     <li>
1618     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
1619     *     </li>
1620     *     <li>
1621     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
1622     *     </li>
1623     *     <li>
1624     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
1625     *     </li>
1626     *     <li>
1627     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
1628     *     </li>
1629     *   </ul>
1630     *   <ul>
1631     *     <li>
1632     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
1633     *     </li>
1634     *   </ul>
1635     *   </li>
1636     * </ul>
1637     * @return automatically generated
1638     */
1639    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) {
1640        Mat objectPoints_mat = objectPoints;
1641        Mat imagePoints_mat = imagePoints;
1642        Mat distCoeffs_mat = distCoeffs;
1643        return solvePnP_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess);
1644    }
1645
1646    /**
1647     * Finds an object pose from 3D-2D point correspondences.
1648     *
1649     * SEE: REF: calib3d_solvePnP
1650     *
1651     * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
1652     * coordinate frame to the camera coordinate frame, using different methods:
1653     * <ul>
1654     *   <li>
1655     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution.
1656     *   </li>
1657     *   <li>
1658     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar.
1659     *   </li>
1660     *   <li>
1661     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
1662     * Number of input points must be 4. Object points must be defined in the following order:
1663     *   <ul>
1664     *     <li>
1665     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
1666     *     </li>
1667     *     <li>
1668     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
1669     *     </li>
1670     *     <li>
1671     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
1672     *     </li>
1673     *     <li>
1674     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
1675     *     </li>
1676     *   </ul>
1677     *   <li>
1678     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
1679     *   </li>
1680     * </ul>
1681     *
1682     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1683     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1684     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1685     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1686     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1687     * @param distCoeffs Input vector of distortion coefficients
1688     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1689     * assumed.
1690     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1691     * the model coordinate system to the camera coordinate system.
1692     * @param tvec Output translation vector.
1693     * the provided rvec and tvec values as initial approximations of the rotation and translation
1694     * vectors, respectively, and further optimizes them.
1695     *
1696     * More information about Perspective-n-Points is described in REF: calib3d_solvePnP
1697     *
1698     * <b>Note:</b>
1699     * <ul>
1700     *   <li>
1701     *       An example of how to use solvePnP for planar augmented reality can be found at
1702     *         opencv_source_code/samples/python/plane_ar.py
1703     *   </li>
1704     *   <li>
1705     *       If you are using Python:
1706     *   <ul>
1707     *     <li>
1708     *          Numpy array slices won't work as input because solvePnP requires contiguous
1709     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
1710     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
1711     *     </li>
1712     *     <li>
1713     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
1714     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
1715     *         which requires 2-channel information.
1716     *     </li>
1717     *     <li>
1718     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
1719     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
1720     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
1721     *     </li>
1722     *   </ul>
1723     *   <li>
1724     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
1725     *        unstable and sometimes give completely wrong results. If you pass one of these two
1726     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
1727     *   </li>
1728     *   <li>
1729     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
1730     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
1731     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
1732     *   </li>
1733     *   <li>
1734     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
1735     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
1736     *        global solution to converge.
1737     *   </li>
1738     *   <li>
1739     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
1740     *   </li>
1741     *   <li>
1742     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
1743     *        Number of input points must be 4. Object points must be defined in the following order:
1744     *   <ul>
1745     *     <li>
1746     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
1747     *     </li>
1748     *     <li>
1749     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
1750     *     </li>
1751     *     <li>
1752     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
1753     *     </li>
1754     *     <li>
1755     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
1756     *     </li>
1757     *   </ul>
1758     *   <ul>
1759     *     <li>
1760     *       With REF: SOLVEPNP_SQPNP input points must be &gt;= 3
1761     *     </li>
1762     *   </ul>
1763     *   </li>
1764     * </ul>
1765     * @return automatically generated
1766     */
1767    public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) {
1768        Mat objectPoints_mat = objectPoints;
1769        Mat imagePoints_mat = imagePoints;
1770        Mat distCoeffs_mat = distCoeffs;
1771        return solvePnP_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj);
1772    }
1773
1774
1775    //
1776    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
1777    //
1778
1779    /**
1780     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
1781     *
1782     * SEE: REF: calib3d_solvePnP
1783     *
1784     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1785     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1786     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1787     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1788     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1789     * @param distCoeffs Input vector of distortion coefficients
1790     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1791     * assumed.
1792     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1793     * the model coordinate system to the camera coordinate system.
1794     * @param tvec Output translation vector.
1795     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
1796     * the provided rvec and tvec values as initial approximations of the rotation and translation
1797     * vectors, respectively, and further optimizes them.
1798     * @param iterationsCount Number of iterations.
1799     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
1800     * is the maximum allowed distance between the observed and computed point projections to consider it
1801     * an inlier.
1802     * @param confidence The probability that the algorithm produces a useful result.
1803     * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
1804     * @param flags Method for solving a PnP problem (see REF: solvePnP ).
1805     *
1806     * The function estimates an object pose given a set of object points, their corresponding image
1807     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
1808     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
1809     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
1810     * makes the function resistant to outliers.
1811     *
1812     * <b>Note:</b>
1813     * <ul>
1814     *   <li>
1815     *       An example of how to use solvePNPRansac for object detection can be found at
1816     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
1817     *   </li>
1818     *   <li>
1819     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
1820     *        is #SOLVEPNP_EPNP. Exceptions are:
1821     *   <ul>
1822     *     <li>
1823     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
1824     *     </li>
1825     *     <li>
1826     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
1827     *     </li>
1828     *   </ul>
1829     *   <li>
1830     *       The method used to estimate the camera pose using all the inliers is defined by the
1831     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
1832     *        the method #SOLVEPNP_EPNP will be used instead.
1833     *   </li>
1834     * </ul>
1835     * @return automatically generated
1836     */
1837    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers, int flags) {
1838        Mat objectPoints_mat = objectPoints;
1839        Mat imagePoints_mat = imagePoints;
1840        Mat distCoeffs_mat = distCoeffs;
1841        return solvePnPRansac_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj, flags);
1842    }
1843
1844    /**
1845     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
1846     *
1847     * SEE: REF: calib3d_solvePnP
1848     *
1849     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1850     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1851     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1852     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1853     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1854     * @param distCoeffs Input vector of distortion coefficients
1855     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1856     * assumed.
1857     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1858     * the model coordinate system to the camera coordinate system.
1859     * @param tvec Output translation vector.
1860     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
1861     * the provided rvec and tvec values as initial approximations of the rotation and translation
1862     * vectors, respectively, and further optimizes them.
1863     * @param iterationsCount Number of iterations.
1864     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
1865     * is the maximum allowed distance between the observed and computed point projections to consider it
1866     * an inlier.
1867     * @param confidence The probability that the algorithm produces a useful result.
1868     * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
1869     *
1870     * The function estimates an object pose given a set of object points, their corresponding image
1871     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
1872     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
1873     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
1874     * makes the function resistant to outliers.
1875     *
1876     * <b>Note:</b>
1877     * <ul>
1878     *   <li>
1879     *       An example of how to use solvePNPRansac for object detection can be found at
1880     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
1881     *   </li>
1882     *   <li>
1883     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
1884     *        is #SOLVEPNP_EPNP. Exceptions are:
1885     *   <ul>
1886     *     <li>
1887     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
1888     *     </li>
1889     *     <li>
1890     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
1891     *     </li>
1892     *   </ul>
1893     *   <li>
1894     *       The method used to estimate the camera pose using all the inliers is defined by the
1895     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
1896     *        the method #SOLVEPNP_EPNP will be used instead.
1897     *   </li>
1898     * </ul>
1899     * @return automatically generated
1900     */
1901    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers) {
1902        Mat objectPoints_mat = objectPoints;
1903        Mat imagePoints_mat = imagePoints;
1904        Mat distCoeffs_mat = distCoeffs;
1905        return solvePnPRansac_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj);
1906    }
1907
1908    /**
1909     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
1910     *
1911     * SEE: REF: calib3d_solvePnP
1912     *
1913     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1914     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1915     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1916     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1917     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1918     * @param distCoeffs Input vector of distortion coefficients
1919     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1920     * assumed.
1921     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1922     * the model coordinate system to the camera coordinate system.
1923     * @param tvec Output translation vector.
1924     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
1925     * the provided rvec and tvec values as initial approximations of the rotation and translation
1926     * vectors, respectively, and further optimizes them.
1927     * @param iterationsCount Number of iterations.
1928     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
1929     * is the maximum allowed distance between the observed and computed point projections to consider it
1930     * an inlier.
1931     * @param confidence The probability that the algorithm produces a useful result.
1932     *
1933     * The function estimates an object pose given a set of object points, their corresponding image
1934     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
1935     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
1936     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
1937     * makes the function resistant to outliers.
1938     *
1939     * <b>Note:</b>
1940     * <ul>
1941     *   <li>
1942     *       An example of how to use solvePNPRansac for object detection can be found at
1943     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
1944     *   </li>
1945     *   <li>
1946     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
1947     *        is #SOLVEPNP_EPNP. Exceptions are:
1948     *   <ul>
1949     *     <li>
1950     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
1951     *     </li>
1952     *     <li>
1953     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
1954     *     </li>
1955     *   </ul>
1956     *   <li>
1957     *       The method used to estimate the camera pose using all the inliers is defined by the
1958     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
1959     *        the method #SOLVEPNP_EPNP will be used instead.
1960     *   </li>
1961     * </ul>
1962     * @return automatically generated
1963     */
1964    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence) {
1965        Mat objectPoints_mat = objectPoints;
1966        Mat imagePoints_mat = imagePoints;
1967        Mat distCoeffs_mat = distCoeffs;
1968        return solvePnPRansac_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence);
1969    }
1970
1971    /**
1972     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
1973     *
1974     * SEE: REF: calib3d_solvePnP
1975     *
1976     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1977     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
1978     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
1979     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
1980     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
1981     * @param distCoeffs Input vector of distortion coefficients
1982     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
1983     * assumed.
1984     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
1985     * the model coordinate system to the camera coordinate system.
1986     * @param tvec Output translation vector.
1987     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
1988     * the provided rvec and tvec values as initial approximations of the rotation and translation
1989     * vectors, respectively, and further optimizes them.
1990     * @param iterationsCount Number of iterations.
1991     * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
1992     * is the maximum allowed distance between the observed and computed point projections to consider it
1993     * an inlier.
1994     *
1995     * The function estimates an object pose given a set of object points, their corresponding image
1996     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
1997     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
1998     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
1999     * makes the function resistant to outliers.
2000     *
2001     * <b>Note:</b>
2002     * <ul>
2003     *   <li>
2004     *       An example of how to use solvePNPRansac for object detection can be found at
2005     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2006     *   </li>
2007     *   <li>
2008     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2009     *        is #SOLVEPNP_EPNP. Exceptions are:
2010     *   <ul>
2011     *     <li>
2012     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2013     *     </li>
2014     *     <li>
2015     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2016     *     </li>
2017     *   </ul>
2018     *   <li>
2019     *       The method used to estimate the camera pose using all the inliers is defined by the
2020     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2021     *        the method #SOLVEPNP_EPNP will be used instead.
2022     *   </li>
2023     * </ul>
2024     * @return automatically generated
2025     */
2026    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError) {
2027        Mat objectPoints_mat = objectPoints;
2028        Mat imagePoints_mat = imagePoints;
2029        Mat distCoeffs_mat = distCoeffs;
2030        return solvePnPRansac_3(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError);
2031    }
2032
2033    /**
2034     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2035     *
2036     * SEE: REF: calib3d_solvePnP
2037     *
2038     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2039     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2040     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2041     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2042     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2043     * @param distCoeffs Input vector of distortion coefficients
2044     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2045     * assumed.
2046     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2047     * the model coordinate system to the camera coordinate system.
2048     * @param tvec Output translation vector.
2049     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2050     * the provided rvec and tvec values as initial approximations of the rotation and translation
2051     * vectors, respectively, and further optimizes them.
2052     * @param iterationsCount Number of iterations.
2053     * is the maximum allowed distance between the observed and computed point projections to consider it
2054     * an inlier.
2055     *
2056     * The function estimates an object pose given a set of object points, their corresponding image
2057     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2058     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2059     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2060     * makes the function resistant to outliers.
2061     *
2062     * <b>Note:</b>
2063     * <ul>
2064     *   <li>
2065     *       An example of how to use solvePNPRansac for object detection can be found at
2066     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2067     *   </li>
2068     *   <li>
2069     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2070     *        is #SOLVEPNP_EPNP. Exceptions are:
2071     *   <ul>
2072     *     <li>
2073     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2074     *     </li>
2075     *     <li>
2076     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2077     *     </li>
2078     *   </ul>
2079     *   <li>
2080     *       The method used to estimate the camera pose using all the inliers is defined by the
2081     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2082     *        the method #SOLVEPNP_EPNP will be used instead.
2083     *   </li>
2084     * </ul>
2085     * @return automatically generated
2086     */
2087    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount) {
2088        Mat objectPoints_mat = objectPoints;
2089        Mat imagePoints_mat = imagePoints;
2090        Mat distCoeffs_mat = distCoeffs;
2091        return solvePnPRansac_4(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount);
2092    }
2093
2094    /**
2095     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2096     *
2097     * SEE: REF: calib3d_solvePnP
2098     *
2099     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2100     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2101     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2102     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2103     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2104     * @param distCoeffs Input vector of distortion coefficients
2105     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2106     * assumed.
2107     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2108     * the model coordinate system to the camera coordinate system.
2109     * @param tvec Output translation vector.
2110     * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses
2111     * the provided rvec and tvec values as initial approximations of the rotation and translation
2112     * vectors, respectively, and further optimizes them.
2113     * is the maximum allowed distance between the observed and computed point projections to consider it
2114     * an inlier.
2115     *
2116     * The function estimates an object pose given a set of object points, their corresponding image
2117     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2118     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2119     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2120     * makes the function resistant to outliers.
2121     *
2122     * <b>Note:</b>
2123     * <ul>
2124     *   <li>
2125     *       An example of how to use solvePNPRansac for object detection can be found at
2126     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2127     *   </li>
2128     *   <li>
2129     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2130     *        is #SOLVEPNP_EPNP. Exceptions are:
2131     *   <ul>
2132     *     <li>
2133     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2134     *     </li>
2135     *     <li>
2136     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2137     *     </li>
2138     *   </ul>
2139     *   <li>
2140     *       The method used to estimate the camera pose using all the inliers is defined by the
2141     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2142     *        the method #SOLVEPNP_EPNP will be used instead.
2143     *   </li>
2144     * </ul>
2145     * @return automatically generated
2146     */
2147    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) {
2148        Mat objectPoints_mat = objectPoints;
2149        Mat imagePoints_mat = imagePoints;
2150        Mat distCoeffs_mat = distCoeffs;
2151        return solvePnPRansac_5(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess);
2152    }
2153
2154    /**
2155     * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
2156     *
2157     * SEE: REF: calib3d_solvePnP
2158     *
2159     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2160     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2161     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2162     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2163     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2164     * @param distCoeffs Input vector of distortion coefficients
2165     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2166     * assumed.
2167     * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2168     * the model coordinate system to the camera coordinate system.
2169     * @param tvec Output translation vector.
2170     * the provided rvec and tvec values as initial approximations of the rotation and translation
2171     * vectors, respectively, and further optimizes them.
2172     * is the maximum allowed distance between the observed and computed point projections to consider it
2173     * an inlier.
2174     *
2175     * The function estimates an object pose given a set of object points, their corresponding image
2176     * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
2177     * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
2178     * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC
2179     * makes the function resistant to outliers.
2180     *
2181     * <b>Note:</b>
2182     * <ul>
2183     *   <li>
2184     *       An example of how to use solvePNPRansac for object detection can be found at
2185     *         opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
2186     *   </li>
2187     *   <li>
2188     *       The default method used to estimate the camera pose for the Minimal Sample Sets step
2189     *        is #SOLVEPNP_EPNP. Exceptions are:
2190     *   <ul>
2191     *     <li>
2192     *           if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
2193     *     </li>
2194     *     <li>
2195     *           if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
2196     *     </li>
2197     *   </ul>
2198     *   <li>
2199     *       The method used to estimate the camera pose using all the inliers is defined by the
2200     *        flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
2201     *        the method #SOLVEPNP_EPNP will be used instead.
2202     *   </li>
2203     * </ul>
2204     * @return automatically generated
2205     */
2206    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) {
2207        Mat objectPoints_mat = objectPoints;
2208        Mat imagePoints_mat = imagePoints;
2209        Mat distCoeffs_mat = distCoeffs;
2210        return solvePnPRansac_6(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj);
2211    }
2212
2213
2214    //
2215    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
2216    //
2217
2218    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers, UsacParams params) {
2219        Mat objectPoints_mat = objectPoints;
2220        Mat imagePoints_mat = imagePoints;
2221        Mat distCoeffs_mat = distCoeffs;
2222        return solvePnPRansac_7(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj, params.nativeObj);
2223    }
2224
2225    public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers) {
2226        Mat objectPoints_mat = objectPoints;
2227        Mat imagePoints_mat = imagePoints;
2228        Mat distCoeffs_mat = distCoeffs;
2229        return solvePnPRansac_8(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj);
2230    }
2231
2232
2233    //
2234    // C++:  int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
2235    //
2236
2237    /**
2238     * Finds an object pose from 3 3D-2D point correspondences.
2239     *
2240     * SEE: REF: calib3d_solvePnP
2241     *
2242     * @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
2243     * 1x3/3x1 3-channel. vector&lt;Point3f&gt; can be also passed here.
2244     * @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
2245     *  vector&lt;Point2f&gt; can be also passed here.
2246     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2247     * @param distCoeffs Input vector of distortion coefficients
2248     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2249     * assumed.
2250     * @param rvecs Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2251     * the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
2252     * @param tvecs Output translation vectors.
2253     * @param flags Method for solving a P3P problem:
2254     * <ul>
2255     *   <li>
2256     *    REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
2257     * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete).
2258     *   </li>
2259     *   <li>
2260     *    REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
2261     * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17).
2262     *   </li>
2263     * </ul>
2264     *
2265     * The function estimates the object pose given 3 object points, their corresponding image
2266     * projections, as well as the camera intrinsic matrix and the distortion coefficients.
2267     *
2268     * <b>Note:</b>
2269     * The solutions are sorted by reprojection errors (lowest to highest).
2270     * @return automatically generated
2271     */
2272    public static int solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) {
2273        Mat rvecs_mat = new Mat();
2274        Mat tvecs_mat = new Mat();
2275        int retVal = solveP3P_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
2276        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2277        rvecs_mat.release();
2278        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2279        tvecs_mat.release();
2280        return retVal;
2281    }
2282
2283
2284    //
2285    // C++:  void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
2286    //
2287
2288    /**
2289     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2290     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2291     *
2292     * SEE: REF: calib3d_solvePnP
2293     *
2294     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2295     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2296     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2297     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2298     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2299     * @param distCoeffs Input vector of distortion coefficients
2300     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2301     * assumed.
2302     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2303     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2304     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2305     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2306     *
2307     * The function refines the object pose given at least 3 object points, their corresponding image
2308     * projections, an initial solution for the rotation and translation vector,
2309     * as well as the camera intrinsic matrix and the distortion coefficients.
2310     * The function minimizes the projection error with respect to the rotation and the translation vectors, according
2311     * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
2312     */
2313    public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) {
2314        solvePnPRefineLM_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
2315    }
2316
2317    /**
2318     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2319     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2320     *
2321     * SEE: REF: calib3d_solvePnP
2322     *
2323     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2324     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2325     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2326     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2327     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2328     * @param distCoeffs Input vector of distortion coefficients
2329     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2330     * assumed.
2331     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2332     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2333     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2334     *
2335     * The function refines the object pose given at least 3 object points, their corresponding image
2336     * projections, an initial solution for the rotation and translation vector,
2337     * as well as the camera intrinsic matrix and the distortion coefficients.
2338     * The function minimizes the projection error with respect to the rotation and the translation vectors, according
2339     * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process.
2340     */
2341    public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) {
2342        solvePnPRefineLM_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj);
2343    }
2344
2345
2346    //
2347    // C++:  void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
2348    //
2349
2350    /**
2351     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2352     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2353     *
2354     * SEE: REF: calib3d_solvePnP
2355     *
2356     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2357     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2358     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2359     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2360     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2361     * @param distCoeffs Input vector of distortion coefficients
2362     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2363     * assumed.
2364     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2365     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2366     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2367     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2368     * @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \(\alpha\)
2369     * gain in the Damped Gauss-Newton formulation.
2370     *
2371     * The function refines the object pose given at least 3 object points, their corresponding image
2372     * projections, an initial solution for the rotation and translation vector,
2373     * as well as the camera intrinsic matrix and the distortion coefficients.
2374     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2375     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2376     */
2377    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria, double VVSlambda) {
2378        solvePnPRefineVVS_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, VVSlambda);
2379    }
2380
2381    /**
2382     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2383     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2384     *
2385     * SEE: REF: calib3d_solvePnP
2386     *
2387     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2388     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2389     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2390     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2391     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2392     * @param distCoeffs Input vector of distortion coefficients
2393     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2394     * assumed.
2395     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2396     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2397     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2398     * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
2399     * gain in the Damped Gauss-Newton formulation.
2400     *
2401     * The function refines the object pose given at least 3 object points, their corresponding image
2402     * projections, an initial solution for the rotation and translation vector,
2403     * as well as the camera intrinsic matrix and the distortion coefficients.
2404     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2405     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2406     */
2407    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) {
2408        solvePnPRefineVVS_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
2409    }
2410
2411    /**
2412     * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
2413     * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
2414     *
2415     * SEE: REF: calib3d_solvePnP
2416     *
2417     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
2418     * where N is the number of points. vector&lt;Point3d&gt; can also be passed here.
2419     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2420     * where N is the number of points. vector&lt;Point2d&gt; can also be passed here.
2421     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2422     * @param distCoeffs Input vector of distortion coefficients
2423     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2424     * assumed.
2425     * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
2426     * the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
2427     * @param tvec Input/Output translation vector. Input values are used as an initial solution.
2428     * gain in the Damped Gauss-Newton formulation.
2429     *
2430     * The function refines the object pose given at least 3 object points, their corresponding image
2431     * projections, an initial solution for the rotation and translation vector,
2432     * as well as the camera intrinsic matrix and the distortion coefficients.
2433     * The function minimizes the projection error with respect to the rotation and the translation vectors, using a
2434     * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme.
2435     */
2436    public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) {
2437        solvePnPRefineVVS_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj);
2438    }
2439
2440
2441    //
2442    // C++:  int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
2443    //
2444
2445    /**
2446     * Finds an object pose from 3D-2D point correspondences.
2447     *
2448     * SEE: REF: calib3d_solvePnP
2449     *
2450     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2451     * couple), depending on the number of input points and the chosen method:
2452     * <ul>
2453     *   <li>
2454     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2455     *   </li>
2456     *   <li>
2457     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
2458     *   </li>
2459     *   <li>
2460     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
2461     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
2462     *   <ul>
2463     *     <li>
2464     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2465     *     </li>
2466     *     <li>
2467     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2468     *     </li>
2469     *     <li>
2470     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2471     *     </li>
2472     *     <li>
2473     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2474     *     </li>
2475     *   </ul>
2476     *   <li>
2477     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
2478     * Only 1 solution is returned.
2479     *   </li>
2480     * </ul>
2481     *
2482     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2483     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2484     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2485     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2486     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2487     * @param distCoeffs Input vector of distortion coefficients
2488     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2489     * assumed.
2490     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2491     * the model coordinate system to the camera coordinate system.
2492     * @param tvecs Vector of output translation vectors.
2493     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
2494     * the provided rvec and tvec values as initial approximations of the rotation and translation
2495     * vectors, respectively, and further optimizes them.
2496     * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
2497     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2498     * and useExtrinsicGuess is set to true.
2499     * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2500     * and useExtrinsicGuess is set to true.
2501     * @param reprojectionError Optional vector of reprojection error, that is the RMS error
2502     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
2503     * and the 3D object points projected with the estimated pose.
2504     *
2505     * More information is described in REF: calib3d_solvePnP
2506     *
2507     * <b>Note:</b>
2508     * <ul>
2509     *   <li>
2510     *       An example of how to use solvePnP for planar augmented reality can be found at
2511     *         opencv_source_code/samples/python/plane_ar.py
2512     *   </li>
2513     *   <li>
2514     *       If you are using Python:
2515     *   <ul>
2516     *     <li>
2517     *          Numpy array slices won't work as input because solvePnP requires contiguous
2518     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
2519     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
2520     *     </li>
2521     *     <li>
2522     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
2523     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
2524     *         which requires 2-channel information.
2525     *     </li>
2526     *     <li>
2527     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
2528     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
2529     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
2530     *     </li>
2531     *   </ul>
2532     *   <li>
2533     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
2534     *        unstable and sometimes give completely wrong results. If you pass one of these two
2535     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
2536     *   </li>
2537     *   <li>
2538     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
2539     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
2540     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
2541     *   </li>
2542     *   <li>
2543     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
2544     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
2545     *        global solution to converge.
2546     *   </li>
2547     *   <li>
2548     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
2549     *   </li>
2550     *   <li>
2551     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
2552     *        Number of input points must be 4. Object points must be defined in the following order:
2553     *   <ul>
2554     *     <li>
2555     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
2556     *     </li>
2557     *     <li>
2558     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
2559     *     </li>
2560     *     <li>
2561     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
2562     *     </li>
2563     *     <li>
2564     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
2565     *     </li>
2566     *   </ul>
2567     *   </li>
2568     * </ul>
2569     * @return automatically generated
2570     */
2571    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec, Mat reprojectionError) {
2572        Mat rvecs_mat = new Mat();
2573        Mat tvecs_mat = new Mat();
2574        int retVal = solvePnPGeneric_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj, reprojectionError.nativeObj);
2575        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2576        rvecs_mat.release();
2577        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2578        tvecs_mat.release();
2579        return retVal;
2580    }
2581
2582    /**
2583     * Finds an object pose from 3D-2D point correspondences.
2584     *
2585     * SEE: REF: calib3d_solvePnP
2586     *
2587     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2588     * couple), depending on the number of input points and the chosen method:
2589     * <ul>
2590     *   <li>
2591     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2592     *   </li>
2593     *   <li>
2594     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
2595     *   </li>
2596     *   <li>
2597     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
2598     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
2599     *   <ul>
2600     *     <li>
2601     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2602     *     </li>
2603     *     <li>
2604     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2605     *     </li>
2606     *     <li>
2607     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2608     *     </li>
2609     *     <li>
2610     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2611     *     </li>
2612     *   </ul>
2613     *   <li>
2614     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
2615     * Only 1 solution is returned.
2616     *   </li>
2617     * </ul>
2618     *
2619     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2620     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2621     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2622     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2623     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2624     * @param distCoeffs Input vector of distortion coefficients
2625     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2626     * assumed.
2627     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2628     * the model coordinate system to the camera coordinate system.
2629     * @param tvecs Vector of output translation vectors.
2630     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
2631     * the provided rvec and tvec values as initial approximations of the rotation and translation
2632     * vectors, respectively, and further optimizes them.
2633     * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
2634     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2635     * and useExtrinsicGuess is set to true.
2636     * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2637     * and useExtrinsicGuess is set to true.
2638     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
2639     * and the 3D object points projected with the estimated pose.
2640     *
2641     * More information is described in REF: calib3d_solvePnP
2642     *
2643     * <b>Note:</b>
2644     * <ul>
2645     *   <li>
2646     *       An example of how to use solvePnP for planar augmented reality can be found at
2647     *         opencv_source_code/samples/python/plane_ar.py
2648     *   </li>
2649     *   <li>
2650     *       If you are using Python:
2651     *   <ul>
2652     *     <li>
2653     *          Numpy array slices won't work as input because solvePnP requires contiguous
2654     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
2655     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
2656     *     </li>
2657     *     <li>
2658     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
2659     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
2660     *         which requires 2-channel information.
2661     *     </li>
2662     *     <li>
2663     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
2664     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
2665     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
2666     *     </li>
2667     *   </ul>
2668     *   <li>
2669     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
2670     *        unstable and sometimes give completely wrong results. If you pass one of these two
2671     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
2672     *   </li>
2673     *   <li>
2674     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
2675     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
2676     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
2677     *   </li>
2678     *   <li>
2679     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
2680     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
2681     *        global solution to converge.
2682     *   </li>
2683     *   <li>
2684     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
2685     *   </li>
2686     *   <li>
2687     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
2688     *        Number of input points must be 4. Object points must be defined in the following order:
2689     *   <ul>
2690     *     <li>
2691     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
2692     *     </li>
2693     *     <li>
2694     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
2695     *     </li>
2696     *     <li>
2697     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
2698     *     </li>
2699     *     <li>
2700     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
2701     *     </li>
2702     *   </ul>
2703     *   </li>
2704     * </ul>
2705     * @return automatically generated
2706     */
2707    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec) {
2708        Mat rvecs_mat = new Mat();
2709        Mat tvecs_mat = new Mat();
2710        int retVal = solvePnPGeneric_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj);
2711        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2712        rvecs_mat.release();
2713        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2714        tvecs_mat.release();
2715        return retVal;
2716    }
2717
2718    /**
2719     * Finds an object pose from 3D-2D point correspondences.
2720     *
2721     * SEE: REF: calib3d_solvePnP
2722     *
2723     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2724     * couple), depending on the number of input points and the chosen method:
2725     * <ul>
2726     *   <li>
2727     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2728     *   </li>
2729     *   <li>
2730     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
2731     *   </li>
2732     *   <li>
2733     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
2734     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
2735     *   <ul>
2736     *     <li>
2737     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2738     *     </li>
2739     *     <li>
2740     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2741     *     </li>
2742     *     <li>
2743     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2744     *     </li>
2745     *     <li>
2746     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2747     *     </li>
2748     *   </ul>
2749     *   <li>
2750     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
2751     * Only 1 solution is returned.
2752     *   </li>
2753     * </ul>
2754     *
2755     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2756     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2757     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2758     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2759     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2760     * @param distCoeffs Input vector of distortion coefficients
2761     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2762     * assumed.
2763     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2764     * the model coordinate system to the camera coordinate system.
2765     * @param tvecs Vector of output translation vectors.
2766     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
2767     * the provided rvec and tvec values as initial approximations of the rotation and translation
2768     * vectors, respectively, and further optimizes them.
2769     * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
2770     * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE
2771     * and useExtrinsicGuess is set to true.
2772     * and useExtrinsicGuess is set to true.
2773     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
2774     * and the 3D object points projected with the estimated pose.
2775     *
2776     * More information is described in REF: calib3d_solvePnP
2777     *
2778     * <b>Note:</b>
2779     * <ul>
2780     *   <li>
2781     *       An example of how to use solvePnP for planar augmented reality can be found at
2782     *         opencv_source_code/samples/python/plane_ar.py
2783     *   </li>
2784     *   <li>
2785     *       If you are using Python:
2786     *   <ul>
2787     *     <li>
2788     *          Numpy array slices won't work as input because solvePnP requires contiguous
2789     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
2790     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
2791     *     </li>
2792     *     <li>
2793     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
2794     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
2795     *         which requires 2-channel information.
2796     *     </li>
2797     *     <li>
2798     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
2799     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
2800     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
2801     *     </li>
2802     *   </ul>
2803     *   <li>
2804     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
2805     *        unstable and sometimes give completely wrong results. If you pass one of these two
2806     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
2807     *   </li>
2808     *   <li>
2809     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
2810     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
2811     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
2812     *   </li>
2813     *   <li>
2814     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
2815     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
2816     *        global solution to converge.
2817     *   </li>
2818     *   <li>
2819     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
2820     *   </li>
2821     *   <li>
2822     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
2823     *        Number of input points must be 4. Object points must be defined in the following order:
2824     *   <ul>
2825     *     <li>
2826     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
2827     *     </li>
2828     *     <li>
2829     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
2830     *     </li>
2831     *     <li>
2832     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
2833     *     </li>
2834     *     <li>
2835     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
2836     *     </li>
2837     *   </ul>
2838     *   </li>
2839     * </ul>
2840     * @return automatically generated
2841     */
2842    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec) {
2843        Mat rvecs_mat = new Mat();
2844        Mat tvecs_mat = new Mat();
2845        int retVal = solvePnPGeneric_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj);
2846        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2847        rvecs_mat.release();
2848        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2849        tvecs_mat.release();
2850        return retVal;
2851    }
2852
2853    /**
2854     * Finds an object pose from 3D-2D point correspondences.
2855     *
2856     * SEE: REF: calib3d_solvePnP
2857     *
2858     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2859     * couple), depending on the number of input points and the chosen method:
2860     * <ul>
2861     *   <li>
2862     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2863     *   </li>
2864     *   <li>
2865     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
2866     *   </li>
2867     *   <li>
2868     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
2869     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
2870     *   <ul>
2871     *     <li>
2872     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
2873     *     </li>
2874     *     <li>
2875     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
2876     *     </li>
2877     *     <li>
2878     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
2879     *     </li>
2880     *     <li>
2881     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
2882     *     </li>
2883     *   </ul>
2884     *   <li>
2885     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
2886     * Only 1 solution is returned.
2887     *   </li>
2888     * </ul>
2889     *
2890     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
2891     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
2892     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
2893     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
2894     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
2895     * @param distCoeffs Input vector of distortion coefficients
2896     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
2897     * assumed.
2898     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
2899     * the model coordinate system to the camera coordinate system.
2900     * @param tvecs Vector of output translation vectors.
2901     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
2902     * the provided rvec and tvec values as initial approximations of the rotation and translation
2903     * vectors, respectively, and further optimizes them.
2904     * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags
2905     * and useExtrinsicGuess is set to true.
2906     * and useExtrinsicGuess is set to true.
2907     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
2908     * and the 3D object points projected with the estimated pose.
2909     *
2910     * More information is described in REF: calib3d_solvePnP
2911     *
2912     * <b>Note:</b>
2913     * <ul>
2914     *   <li>
2915     *       An example of how to use solvePnP for planar augmented reality can be found at
2916     *         opencv_source_code/samples/python/plane_ar.py
2917     *   </li>
2918     *   <li>
2919     *       If you are using Python:
2920     *   <ul>
2921     *     <li>
2922     *          Numpy array slices won't work as input because solvePnP requires contiguous
2923     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
2924     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
2925     *     </li>
2926     *     <li>
2927     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
2928     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
2929     *         which requires 2-channel information.
2930     *     </li>
2931     *     <li>
2932     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
2933     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
2934     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
2935     *     </li>
2936     *   </ul>
2937     *   <li>
2938     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
2939     *        unstable and sometimes give completely wrong results. If you pass one of these two
2940     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
2941     *   </li>
2942     *   <li>
2943     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
2944     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
2945     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
2946     *   </li>
2947     *   <li>
2948     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
2949     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
2950     *        global solution to converge.
2951     *   </li>
2952     *   <li>
2953     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
2954     *   </li>
2955     *   <li>
2956     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
2957     *        Number of input points must be 4. Object points must be defined in the following order:
2958     *   <ul>
2959     *     <li>
2960     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
2961     *     </li>
2962     *     <li>
2963     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
2964     *     </li>
2965     *     <li>
2966     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
2967     *     </li>
2968     *     <li>
2969     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
2970     *     </li>
2971     *   </ul>
2972     *   </li>
2973     * </ul>
2974     * @return automatically generated
2975     */
2976    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags) {
2977        Mat rvecs_mat = new Mat();
2978        Mat tvecs_mat = new Mat();
2979        int retVal = solvePnPGeneric_3(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags);
2980        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
2981        rvecs_mat.release();
2982        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
2983        tvecs_mat.release();
2984        return retVal;
2985    }
2986
2987    /**
2988     * Finds an object pose from 3D-2D point correspondences.
2989     *
2990     * SEE: REF: calib3d_solvePnP
2991     *
2992     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
2993     * couple), depending on the number of input points and the chosen method:
2994     * <ul>
2995     *   <li>
2996     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
2997     *   </li>
2998     *   <li>
2999     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3000     *   </li>
3001     *   <li>
3002     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
3003     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
3004     *   <ul>
3005     *     <li>
3006     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3007     *     </li>
3008     *     <li>
3009     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3010     *     </li>
3011     *     <li>
3012     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3013     *     </li>
3014     *     <li>
3015     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3016     *     </li>
3017     *   </ul>
3018     *   <li>
3019     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
3020     * Only 1 solution is returned.
3021     *   </li>
3022     * </ul>
3023     *
3024     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
3025     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
3026     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
3027     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
3028     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
3029     * @param distCoeffs Input vector of distortion coefficients
3030     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
3031     * assumed.
3032     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
3033     * the model coordinate system to the camera coordinate system.
3034     * @param tvecs Vector of output translation vectors.
3035     * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
3036     * the provided rvec and tvec values as initial approximations of the rotation and translation
3037     * vectors, respectively, and further optimizes them.
3038     * and useExtrinsicGuess is set to true.
3039     * and useExtrinsicGuess is set to true.
3040     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
3041     * and the 3D object points projected with the estimated pose.
3042     *
3043     * More information is described in REF: calib3d_solvePnP
3044     *
3045     * <b>Note:</b>
3046     * <ul>
3047     *   <li>
3048     *       An example of how to use solvePnP for planar augmented reality can be found at
3049     *         opencv_source_code/samples/python/plane_ar.py
3050     *   </li>
3051     *   <li>
3052     *       If you are using Python:
3053     *   <ul>
3054     *     <li>
3055     *          Numpy array slices won't work as input because solvePnP requires contiguous
3056     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3057     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3058     *     </li>
3059     *     <li>
3060     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3061     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3062     *         which requires 2-channel information.
3063     *     </li>
3064     *     <li>
3065     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3066     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3067     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3068     *     </li>
3069     *   </ul>
3070     *   <li>
3071     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3072     *        unstable and sometimes give completely wrong results. If you pass one of these two
3073     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3074     *   </li>
3075     *   <li>
3076     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3077     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3078     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3079     *   </li>
3080     *   <li>
3081     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3082     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3083     *        global solution to converge.
3084     *   </li>
3085     *   <li>
3086     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3087     *   </li>
3088     *   <li>
3089     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3090     *        Number of input points must be 4. Object points must be defined in the following order:
3091     *   <ul>
3092     *     <li>
3093     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3094     *     </li>
3095     *     <li>
3096     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3097     *     </li>
3098     *     <li>
3099     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3100     *     </li>
3101     *     <li>
3102     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3103     *     </li>
3104     *   </ul>
3105     *   </li>
3106     * </ul>
3107     * @return automatically generated
3108     */
3109    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess) {
3110        Mat rvecs_mat = new Mat();
3111        Mat tvecs_mat = new Mat();
3112        int retVal = solvePnPGeneric_4(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess);
3113        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3114        rvecs_mat.release();
3115        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3116        tvecs_mat.release();
3117        return retVal;
3118    }
3119
3120    /**
3121     * Finds an object pose from 3D-2D point correspondences.
3122     *
3123     * SEE: REF: calib3d_solvePnP
3124     *
3125     * This function returns a list of all the possible solutions (a solution is a &lt;rotation vector, translation vector&gt;
3126     * couple), depending on the number of input points and the chosen method:
3127     * <ul>
3128     *   <li>
3129     *  P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
3130     *   </li>
3131     *   <li>
3132     *  REF: SOLVEPNP_IPPE Input points must be &gt;= 4 and object points must be coplanar. Returns 2 solutions.
3133     *   </li>
3134     *   <li>
3135     *  REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
3136     * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
3137     *   <ul>
3138     *     <li>
3139     *    point 0: [-squareLength / 2,  squareLength / 2, 0]
3140     *     </li>
3141     *     <li>
3142     *    point 1: [ squareLength / 2,  squareLength / 2, 0]
3143     *     </li>
3144     *     <li>
3145     *    point 2: [ squareLength / 2, -squareLength / 2, 0]
3146     *     </li>
3147     *     <li>
3148     *    point 3: [-squareLength / 2, -squareLength / 2, 0]
3149     *     </li>
3150     *   </ul>
3151     *   <li>
3152     *  for all the other flags, number of input points must be &gt;= 4 and object points can be in any configuration.
3153     * Only 1 solution is returned.
3154     *   </li>
3155     * </ul>
3156     *
3157     * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
3158     * 1xN/Nx1 3-channel, where N is the number of points. vector&lt;Point3d&gt; can be also passed here.
3159     * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
3160     * where N is the number of points. vector&lt;Point2d&gt; can be also passed here.
3161     * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) .
3162     * @param distCoeffs Input vector of distortion coefficients
3163     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
3164     * assumed.
3165     * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from
3166     * the model coordinate system to the camera coordinate system.
3167     * @param tvecs Vector of output translation vectors.
3168     * the provided rvec and tvec values as initial approximations of the rotation and translation
3169     * vectors, respectively, and further optimizes them.
3170     * and useExtrinsicGuess is set to true.
3171     * and useExtrinsicGuess is set to true.
3172     * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points
3173     * and the 3D object points projected with the estimated pose.
3174     *
3175     * More information is described in REF: calib3d_solvePnP
3176     *
3177     * <b>Note:</b>
3178     * <ul>
3179     *   <li>
3180     *       An example of how to use solvePnP for planar augmented reality can be found at
3181     *         opencv_source_code/samples/python/plane_ar.py
3182     *   </li>
3183     *   <li>
3184     *       If you are using Python:
3185     *   <ul>
3186     *     <li>
3187     *          Numpy array slices won't work as input because solvePnP requires contiguous
3188     *         arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
3189     *         modules/calib3d/src/solvepnp.cpp version 2.4.9)
3190     *     </li>
3191     *     <li>
3192     *          The P3P algorithm requires image points to be in an array of shape (N,1,2) due
3193     *         to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
3194     *         which requires 2-channel information.
3195     *     </li>
3196     *     <li>
3197     *          Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
3198     *         it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
3199     *         np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
3200     *     </li>
3201     *   </ul>
3202     *   <li>
3203     *       The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are
3204     *        unstable and sometimes give completely wrong results. If you pass one of these two
3205     *        flags, REF: SOLVEPNP_EPNP method will be used instead.
3206     *   </li>
3207     *   <li>
3208     *       The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P
3209     *        methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
3210     *        of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
3211     *   </li>
3212     *   <li>
3213     *       With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points
3214     *        are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
3215     *        global solution to converge.
3216     *   </li>
3217     *   <li>
3218     *       With REF: SOLVEPNP_IPPE input points must be &gt;= 4 and object points must be coplanar.
3219     *   </li>
3220     *   <li>
3221     *       With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
3222     *        Number of input points must be 4. Object points must be defined in the following order:
3223     *   <ul>
3224     *     <li>
3225     *           point 0: [-squareLength / 2,  squareLength / 2, 0]
3226     *     </li>
3227     *     <li>
3228     *           point 1: [ squareLength / 2,  squareLength / 2, 0]
3229     *     </li>
3230     *     <li>
3231     *           point 2: [ squareLength / 2, -squareLength / 2, 0]
3232     *     </li>
3233     *     <li>
3234     *           point 3: [-squareLength / 2, -squareLength / 2, 0]
3235     *     </li>
3236     *   </ul>
3237     *   </li>
3238     * </ul>
3239     * @return automatically generated
3240     */
3241    public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) {
3242        Mat rvecs_mat = new Mat();
3243        Mat tvecs_mat = new Mat();
3244        int retVal = solvePnPGeneric_5(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
3245        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3246        rvecs_mat.release();
3247        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3248        tvecs_mat.release();
3249        return retVal;
3250    }
3251
3252
3253    //
3254    // C++:  Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0)
3255    //
3256
3257    /**
3258     * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
3259     *
3260     * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
3261     * coordinate space. In the old interface all the per-view vectors are concatenated. See
3262     * #calibrateCamera for details.
3263     * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
3264     * old interface all the per-view vectors are concatenated.
3265     * @param imageSize Image size in pixels used to initialize the principal point.
3266     * @param aspectRatio If it is zero or negative, both \(f_x\) and \(f_y\) are estimated independently.
3267     * Otherwise, \(f_x = f_y \cdot \texttt{aspectRatio}\) .
3268     *
3269     * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
3270     * Currently, the function only supports planar calibration patterns, which are patterns where each
3271     * object point has z-coordinate =0.
3272     * @return automatically generated
3273     */
3274    public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio) {
3275        List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
3276        Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
3277        List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
3278        Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
3279        return new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio));
3280    }
3281
3282    /**
3283     * Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
3284     *
3285     * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
3286     * coordinate space. In the old interface all the per-view vectors are concatenated. See
3287     * #calibrateCamera for details.
3288     * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
3289     * old interface all the per-view vectors are concatenated.
3290     * @param imageSize Image size in pixels used to initialize the principal point.
3291     * Otherwise, \(f_x = f_y \cdot \texttt{aspectRatio}\) .
3292     *
3293     * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
3294     * Currently, the function only supports planar calibration patterns, which are patterns where each
3295     * object point has z-coordinate =0.
3296     * @return automatically generated
3297     */
3298    public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize) {
3299        List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
3300        Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
3301        List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
3302        Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
3303        return new Mat(initCameraMatrix2D_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height));
3304    }
3305
3306
3307    //
3308    // C++:  bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
3309    //
3310
3311    /**
3312     * Finds the positions of internal corners of the chessboard.
3313     *
3314     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
3315     * @param patternSize Number of inner corners per a chessboard row and column
3316     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
3317     * @param corners Output array of detected corners.
3318     * @param flags Various operation flags that can be zero or a combination of the following values:
3319     * <ul>
3320     *   <li>
3321     *    REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
3322     * and white, rather than a fixed threshold level (computed from the average image brightness).
3323     *   </li>
3324     *   <li>
3325     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
3326     * applying fixed or adaptive thresholding.
3327     *   </li>
3328     *   <li>
3329     *    REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
3330     * square-like shape) to filter out false quads extracted at the contour retrieval stage.
3331     *   </li>
3332     *   <li>
3333     *    REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
3334     * and shortcut the call if none is found. This can drastically speed up the call in the
3335     * degenerate condition when no chessboard is observed.
3336     *   </li>
3337     * </ul>
3338     *
3339     * The function attempts to determine whether the input image is a view of the chessboard pattern and
3340     * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
3341     * are found and they are placed in a certain order (row by row, left to right in every row).
3342     * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
3343     * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
3344     * squares touch each other. The detected coordinates are approximate, and to determine their positions
3345     * more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
3346     * different parameters if returned coordinates are not accurate enough.
3347     *
3348     * Sample usage of detecting and drawing chessboard corners: :
3349     * <code>
3350     *     Size patternsize(8,6); //interior number of corners
3351     *     Mat gray = ....; //source image
3352     *     vector&lt;Point2f&gt; corners; //this will be filled by the detected corners
3353     *
3354     *     //CALIB_CB_FAST_CHECK saves a lot of time on images
3355     *     //that do not contain any chessboard corners
3356     *     bool patternfound = findChessboardCorners(gray, patternsize, corners,
3357     *             CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
3358     *             + CALIB_CB_FAST_CHECK);
3359     *
3360     *     if(patternfound)
3361     *       cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
3362     *         TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
3363     *
3364     *     drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
3365     * </code>
3366     * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
3367     * the board to make the detection more robust in various environments. Otherwise, if there is no
3368     * border and the background is dark, the outer black squares cannot be segmented properly and so the
3369     * square grouping and ordering algorithm fails.
3370     *
3371     * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
3372     * @return automatically generated
3373     */
3374    public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags) {
3375        Mat corners_mat = corners;
3376        return findChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, flags);
3377    }
3378
3379    /**
3380     * Finds the positions of internal corners of the chessboard.
3381     *
3382     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
3383     * @param patternSize Number of inner corners per a chessboard row and column
3384     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
3385     * @param corners Output array of detected corners.
3386     * <ul>
3387     *   <li>
3388     *    REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
3389     * and white, rather than a fixed threshold level (computed from the average image brightness).
3390     *   </li>
3391     *   <li>
3392     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
3393     * applying fixed or adaptive thresholding.
3394     *   </li>
3395     *   <li>
3396     *    REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
3397     * square-like shape) to filter out false quads extracted at the contour retrieval stage.
3398     *   </li>
3399     *   <li>
3400     *    REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
3401     * and shortcut the call if none is found. This can drastically speed up the call in the
3402     * degenerate condition when no chessboard is observed.
3403     *   </li>
3404     * </ul>
3405     *
3406     * The function attempts to determine whether the input image is a view of the chessboard pattern and
3407     * locate the internal chessboard corners. The function returns a non-zero value if all of the corners
3408     * are found and they are placed in a certain order (row by row, left to right in every row).
3409     * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
3410     * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
3411     * squares touch each other. The detected coordinates are approximate, and to determine their positions
3412     * more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
3413     * different parameters if returned coordinates are not accurate enough.
3414     *
3415     * Sample usage of detecting and drawing chessboard corners: :
3416     * <code>
3417     *     Size patternsize(8,6); //interior number of corners
3418     *     Mat gray = ....; //source image
3419     *     vector&lt;Point2f&gt; corners; //this will be filled by the detected corners
3420     *
3421     *     //CALIB_CB_FAST_CHECK saves a lot of time on images
3422     *     //that do not contain any chessboard corners
3423     *     bool patternfound = findChessboardCorners(gray, patternsize, corners,
3424     *             CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
3425     *             + CALIB_CB_FAST_CHECK);
3426     *
3427     *     if(patternfound)
3428     *       cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
3429     *         TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
3430     *
3431     *     drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
3432     * </code>
3433     * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around
3434     * the board to make the detection more robust in various environments. Otherwise, if there is no
3435     * border and the background is dark, the outer black squares cannot be segmented properly and so the
3436     * square grouping and ordering algorithm fails.
3437     *
3438     * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
3439     * @return automatically generated
3440     */
3441    public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners) {
3442        Mat corners_mat = corners;
3443        return findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj);
3444    }
3445
3446
3447    //
3448    // C++:  bool cv::checkChessboard(Mat img, Size size)
3449    //
3450
3451    public static boolean checkChessboard(Mat img, Size size) {
3452        return checkChessboard_0(img.nativeObj, size.width, size.height);
3453    }
3454
3455
3456    //
3457    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
3458    //
3459
3460    /**
3461     * Finds the positions of internal corners of the chessboard using a sector based approach.
3462     *
3463     * @param image Source chessboard view. It must be an 8-bit grayscale or color image.
3464     * @param patternSize Number of inner corners per a chessboard row and column
3465     * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
3466     * @param corners Output array of detected corners.
3467     * @param flags Various operation flags that can be zero or a combination of the following values:
3468     * <ul>
3469     *   <li>
3470     *    REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
3471     *   </li>
3472     *   <li>
3473     *    REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
3474     *   </li>
3475     *   <li>
3476     *    REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
3477     *   </li>
3478     *   <li>
3479     *    REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
3480     *   </li>
3481     *   <li>
3482     *    REF: CALIB_CB_MARKER The detected pattern must have a marker (see description).
3483     * This should be used if an accurate camera calibration is required.
3484     *   </li>
3485     * </ul>
3486     * @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
3487     * Each entry stands for one corner of the pattern and can have one of the following values:
3488     * <ul>
3489     *   <li>
3490     *    0 = no meta data attached
3491     *   </li>
3492     *   <li>
3493     *    1 = left-top corner of a black cell
3494     *   </li>
3495     *   <li>
3496     *    2 = left-top corner of a white cell
3497     *   </li>
3498     *   <li>
3499     *    3 = left-top corner of a black cell with a white marker dot
3500     *   </li>
3501     *   <li>
3502     *    4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
3503     *   </li>
3504     * </ul>
3505     *
3506     * The function is analog to #findChessboardCorners but uses a localized radon
3507     * transformation approximated by box filters being more robust to all sort of
3508     * noise, faster on larger images and is able to directly return the sub-pixel
3509     * position of the internal chessboard corners. The Method is based on the paper
3510     * CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for
3511     * Calibration" demonstrating that the returned sub-pixel positions are more
3512     * accurate than the one returned by cornerSubPix allowing a precise camera
3513     * calibration for demanding applications.
3514     *
3515     * In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given,
3516     * the result can be recovered from the optional meta array. Both flags are
3517     * helpful to use calibration patterns exceeding the field of view of the camera.
3518     * These oversized patterns allow more accurate calibrations as corners can be
3519     * utilized, which are as close as possible to the image borders.  For a
3520     * consistent coordinate system across all images, the optional marker (see image
3521     * below) can be used to move the origin of the board to the location where the
3522     * black circle is located.
3523     *
3524     * <b>Note:</b> The function requires a white boarder with roughly the same width as one
3525     * of the checkerboard fields around the whole board to improve the detection in
3526     * various environments. In addition, because of the localized radon
3527     * transformation it is beneficial to use round corners for the field corners
3528     * which are located on the outside of the board. The following figure illustrates
3529     * a sample checkerboard optimized for the detection. However, any other checkerboard
3530     * can be used as well.
3531     *
3532     * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard.
3533     * ![Checkerboard](pics/checkerboard_radon.png)
3534     * @return automatically generated
3535     */
3536    public static boolean findChessboardCornersSBWithMeta(Mat image, Size patternSize, Mat corners, int flags, Mat meta) {
3537        return findChessboardCornersSBWithMeta_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags, meta.nativeObj);
3538    }
3539
3540
3541    //
3542    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
3543    //
3544
3545    public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags) {
3546        return findChessboardCornersSB_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags);
3547    }
3548
3549    public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners) {
3550        return findChessboardCornersSB_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj);
3551    }
3552
3553
3554    //
3555    // C++:  Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
3556    //
3557
3558    /**
3559     * Estimates the sharpness of a detected chessboard.
3560     *
3561     * Image sharpness, as well as brightness, are a critical parameter for accuracte
3562     * camera calibration. For accessing these parameters for filtering out
3563     * problematic calibraiton images, this method calculates edge profiles by traveling from
3564     * black to white chessboard cell centers. Based on this, the number of pixels is
3565     * calculated required to transit from black to white. This width of the
3566     * transition area is a good indication of how sharp the chessboard is imaged
3567     * and should be below ~3.0 pixels.
3568     *
3569     * @param image Gray image used to find chessboard corners
3570     * @param patternSize Size of a found chessboard pattern
3571     * @param corners Corners found by #findChessboardCornersSB
3572     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
3573     * @param vertical By default edge responses for horizontal lines are calculated
3574     * @param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
3575     *
3576     * The optional sharpness array is of type CV_32FC1 and has for each calculated
3577     * profile one row with the following five entries:
3578     * 0 = x coordinate of the underlying edge in the image
3579     * 1 = y coordinate of the underlying edge in the image
3580     * 2 = width of the transition area (sharpness)
3581     * 3 = signal strength in the black cell (min brightness)
3582     * 4 = signal strength in the white cell (max brightness)
3583     *
3584     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
3585     */
3586    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical, Mat sharpness) {
3587        return new Scalar(estimateChessboardSharpness_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical, sharpness.nativeObj));
3588    }
3589
3590    /**
3591     * Estimates the sharpness of a detected chessboard.
3592     *
3593     * Image sharpness, as well as brightness, are a critical parameter for accuracte
3594     * camera calibration. For accessing these parameters for filtering out
3595     * problematic calibraiton images, this method calculates edge profiles by traveling from
3596     * black to white chessboard cell centers. Based on this, the number of pixels is
3597     * calculated required to transit from black to white. This width of the
3598     * transition area is a good indication of how sharp the chessboard is imaged
3599     * and should be below ~3.0 pixels.
3600     *
3601     * @param image Gray image used to find chessboard corners
3602     * @param patternSize Size of a found chessboard pattern
3603     * @param corners Corners found by #findChessboardCornersSB
3604     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
3605     * @param vertical By default edge responses for horizontal lines are calculated
3606     *
3607     * The optional sharpness array is of type CV_32FC1 and has for each calculated
3608     * profile one row with the following five entries:
3609     * 0 = x coordinate of the underlying edge in the image
3610     * 1 = y coordinate of the underlying edge in the image
3611     * 2 = width of the transition area (sharpness)
3612     * 3 = signal strength in the black cell (min brightness)
3613     * 4 = signal strength in the white cell (max brightness)
3614     *
3615     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
3616     */
3617    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical) {
3618        return new Scalar(estimateChessboardSharpness_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical));
3619    }
3620
3621    /**
3622     * Estimates the sharpness of a detected chessboard.
3623     *
3624     * Image sharpness, as well as brightness, are a critical parameter for accuracte
3625     * camera calibration. For accessing these parameters for filtering out
3626     * problematic calibraiton images, this method calculates edge profiles by traveling from
3627     * black to white chessboard cell centers. Based on this, the number of pixels is
3628     * calculated required to transit from black to white. This width of the
3629     * transition area is a good indication of how sharp the chessboard is imaged
3630     * and should be below ~3.0 pixels.
3631     *
3632     * @param image Gray image used to find chessboard corners
3633     * @param patternSize Size of a found chessboard pattern
3634     * @param corners Corners found by #findChessboardCornersSB
3635     * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
3636     *
3637     * The optional sharpness array is of type CV_32FC1 and has for each calculated
3638     * profile one row with the following five entries:
3639     * 0 = x coordinate of the underlying edge in the image
3640     * 1 = y coordinate of the underlying edge in the image
3641     * 2 = width of the transition area (sharpness)
3642     * 3 = signal strength in the black cell (min brightness)
3643     * 4 = signal strength in the white cell (max brightness)
3644     *
3645     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
3646     */
3647    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance) {
3648        return new Scalar(estimateChessboardSharpness_2(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance));
3649    }
3650
3651    /**
3652     * Estimates the sharpness of a detected chessboard.
3653     *
3654     * Image sharpness, as well as brightness, are a critical parameter for accuracte
3655     * camera calibration. For accessing these parameters for filtering out
3656     * problematic calibraiton images, this method calculates edge profiles by traveling from
3657     * black to white chessboard cell centers. Based on this, the number of pixels is
3658     * calculated required to transit from black to white. This width of the
3659     * transition area is a good indication of how sharp the chessboard is imaged
3660     * and should be below ~3.0 pixels.
3661     *
3662     * @param image Gray image used to find chessboard corners
3663     * @param patternSize Size of a found chessboard pattern
3664     * @param corners Corners found by #findChessboardCornersSB
3665     *
3666     * The optional sharpness array is of type CV_32FC1 and has for each calculated
3667     * profile one row with the following five entries:
3668     * 0 = x coordinate of the underlying edge in the image
3669     * 1 = y coordinate of the underlying edge in the image
3670     * 2 = width of the transition area (sharpness)
3671     * 3 = signal strength in the black cell (min brightness)
3672     * 4 = signal strength in the white cell (max brightness)
3673     *
3674     * @return Scalar(average sharpness, average min brightness, average max brightness,0)
3675     */
3676    public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners) {
3677        return new Scalar(estimateChessboardSharpness_3(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj));
3678    }
3679
3680
3681    //
3682    // C++:  bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
3683    //
3684
3685    public static boolean find4QuadCornerSubpix(Mat img, Mat corners, Size region_size) {
3686        return find4QuadCornerSubpix_0(img.nativeObj, corners.nativeObj, region_size.width, region_size.height);
3687    }
3688
3689
3690    //
3691    // C++:  void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound)
3692    //
3693
3694    /**
3695     * Renders the detected chessboard corners.
3696     *
3697     * @param image Destination image. It must be an 8-bit color image.
3698     * @param patternSize Number of inner corners per a chessboard row and column
3699     * (patternSize = cv::Size(points_per_row,points_per_column)).
3700     * @param corners Array of detected corners, the output of #findChessboardCorners.
3701     * @param patternWasFound Parameter indicating whether the complete board was found or not. The
3702     * return value of #findChessboardCorners should be passed here.
3703     *
3704     * The function draws individual chessboard corners detected either as red circles if the board was not
3705     * found, or as colored corners connected with lines if the board was found.
3706     */
3707    public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound) {
3708        Mat corners_mat = corners;
3709        drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound);
3710    }
3711
3712
3713    //
3714    // C++:  void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
3715    //
3716
3717    /**
3718     * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP
3719     *
3720     * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
3721     * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
3722     * \(\cameramatrix{A}\)
3723     * @param distCoeffs Input vector of distortion coefficients
3724     * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.
3725     * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
3726     * the model coordinate system to the camera coordinate system.
3727     * @param tvec Translation vector.
3728     * @param length Length of the painted axes in the same unit than tvec (usually in meters).
3729     * @param thickness Line thickness of the painted axes.
3730     *
3731     * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
3732     * OX is drawn in red, OY in green and OZ in blue.
3733     */
3734    public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness) {
3735        drawFrameAxes_0(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length, thickness);
3736    }
3737
3738    /**
3739     * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP
3740     *
3741     * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
3742     * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
3743     * \(\cameramatrix{A}\)
3744     * @param distCoeffs Input vector of distortion coefficients
3745     * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed.
3746     * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from
3747     * the model coordinate system to the camera coordinate system.
3748     * @param tvec Translation vector.
3749     * @param length Length of the painted axes in the same unit than tvec (usually in meters).
3750     *
3751     * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
3752     * OX is drawn in red, OY in green and OZ in blue.
3753     */
3754    public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length) {
3755        drawFrameAxes_1(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length);
3756    }
3757
3758
3759    //
3760    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags, Ptr_FeatureDetector blobDetector, CirclesGridFinderParameters parameters)
3761    //
3762
3763    // Unknown type 'Ptr_FeatureDetector' (I), skipping the function
3764
3765
3766    //
3767    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create())
3768    //
3769
3770    public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers, int flags) {
3771        return findCirclesGrid_0(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj, flags);
3772    }
3773
3774    public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers) {
3775        return findCirclesGrid_2(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj);
3776    }
3777
3778
3779    //
3780    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
3781    //
3782
3783    /**
3784     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
3785     * pattern.
3786     *
3787     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
3788     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
3789     * vector contains as many elements as the number of pattern views. If the same calibration pattern
3790     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
3791     * possible to use partially occluded patterns or even different patterns in different views. Then,
3792     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
3793     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
3794     * In the old interface all the vectors of object points from different views are concatenated
3795     * together.
3796     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
3797     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
3798     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
3799     * respectively. In the old interface all the vectors of object points from different views are
3800     * concatenated together.
3801     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
3802     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
3803     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
3804     * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
3805     * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
3806     * @param distCoeffs Input/output vector of distortion coefficients
3807     * \(\distcoeffs\).
3808     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
3809     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
3810     * i-th translation vector (see the next output parameter description) brings the calibration pattern
3811     * from the object coordinate space (in which object points are specified) to the camera coordinate
3812     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
3813     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
3814     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
3815     * space.
3816     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
3817     * describtion above.
3818     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
3819     * parameters. Order of deviations values:
3820     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
3821     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
3822     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
3823     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
3824     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
3825     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
3826     * @param flags Different flags that may be zero or a combination of the following values:
3827     * <ul>
3828     *   <li>
3829     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
3830     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
3831     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
3832     * Note, that if intrinsic parameters are known, there is no need to use this function just to
3833     * estimate extrinsic parameters. Use REF: solvePnP instead.
3834     *   </li>
3835     *   <li>
3836     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
3837     * optimization. It stays at the center or at a different location specified when
3838     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
3839     *   </li>
3840     *   <li>
3841     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
3842     * ratio fx/fy stays the same as in the input cameraMatrix . When
3843     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
3844     * ignored, only their ratio is computed and used further.
3845     *   </li>
3846     *   <li>
3847     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
3848     * to zeros and stay zero.
3849     *   </li>
3850     *   <li>
3851     *    REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
3852     *  REF: CALIB_USE_INTRINSIC_GUESS is set.
3853     *   </li>
3854     *   <li>
3855     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
3856     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
3857     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
3858     *   </li>
3859     *   <li>
3860     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
3861     * backward compatibility, this extra flag should be explicitly specified to make the
3862     * calibration function use the rational model and return 8 coefficients or more.
3863     *   </li>
3864     *   <li>
3865     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
3866     * backward compatibility, this extra flag should be explicitly specified to make the
3867     * calibration function use the thin prism model and return 12 coefficients or more.
3868     *   </li>
3869     *   <li>
3870     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
3871     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
3872     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
3873     *   </li>
3874     *   <li>
3875     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
3876     * backward compatibility, this extra flag should be explicitly specified to make the
3877     * calibration function use the tilted sensor model and return 14 coefficients.
3878     *   </li>
3879     *   <li>
3880     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
3881     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
3882     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
3883     *   </li>
3884     * </ul>
3885     * @param criteria Termination criteria for the iterative optimization algorithm.
3886     *
3887     * @return the overall RMS re-projection error.
3888     *
3889     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
3890     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
3891     * points and their corresponding 2D projections in each view must be specified. That may be achieved
3892     * by using an object with known geometry and easily detectable feature points. Such an object is
3893     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
3894     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
3895     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
3896     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
3897     * be used as long as initial cameraMatrix is provided.
3898     *
3899     * The algorithm performs the following steps:
3900     *
3901     * <ul>
3902     *   <li>
3903     *    Compute the initial intrinsic parameters (the option only available for planar calibration
3904     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
3905     *     zeros initially unless some of CALIB_FIX_K? are specified.
3906     *   </li>
3907     * </ul>
3908     *
3909     * <ul>
3910     *   <li>
3911     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
3912     *     done using REF: solvePnP .
3913     *   </li>
3914     * </ul>
3915     *
3916     * <ul>
3917     *   <li>
3918     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
3919     *     that is, the total sum of squared distances between the observed feature points imagePoints and
3920     *     the projected (using the current estimates for camera parameters and the poses) object points
3921     *     objectPoints. See REF: projectPoints for details.
3922     *   </li>
3923     * </ul>
3924     *
3925     * <b>Note:</b>
3926     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
3927     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
3928     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
3929     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
3930     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
3931     *
3932     * SEE:
3933     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
3934     *    undistort
3935     */
3936    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags, TermCriteria criteria) {
3937        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
3938        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
3939        Mat rvecs_mat = new Mat();
3940        Mat tvecs_mat = new Mat();
3941        double retVal = calibrateCameraExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
3942        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
3943        rvecs_mat.release();
3944        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
3945        tvecs_mat.release();
3946        return retVal;
3947    }
3948
3949    /**
3950     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
3951     * pattern.
3952     *
3953     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
3954     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
3955     * vector contains as many elements as the number of pattern views. If the same calibration pattern
3956     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
3957     * possible to use partially occluded patterns or even different patterns in different views. Then,
3958     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
3959     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
3960     * In the old interface all the vectors of object points from different views are concatenated
3961     * together.
3962     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
3963     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
3964     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
3965     * respectively. In the old interface all the vectors of object points from different views are
3966     * concatenated together.
3967     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
3968     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
3969     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
3970     * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
3971     * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
3972     * @param distCoeffs Input/output vector of distortion coefficients
3973     * \(\distcoeffs\).
3974     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
3975     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
3976     * i-th translation vector (see the next output parameter description) brings the calibration pattern
3977     * from the object coordinate space (in which object points are specified) to the camera coordinate
3978     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
3979     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
3980     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
3981     * space.
3982     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
3983     * describtion above.
3984     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
3985     * parameters. Order of deviations values:
3986     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
3987     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
3988     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
3989     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
3990     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
3991     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
3992     * @param flags Different flags that may be zero or a combination of the following values:
3993     * <ul>
3994     *   <li>
3995     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
3996     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
3997     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
3998     * Note, that if intrinsic parameters are known, there is no need to use this function just to
3999     * estimate extrinsic parameters. Use REF: solvePnP instead.
4000     *   </li>
4001     *   <li>
4002     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
4003     * optimization. It stays at the center or at a different location specified when
4004     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
4005     *   </li>
4006     *   <li>
4007     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
4008     * ratio fx/fy stays the same as in the input cameraMatrix . When
4009     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
4010     * ignored, only their ratio is computed and used further.
4011     *   </li>
4012     *   <li>
4013     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
4014     * to zeros and stay zero.
4015     *   </li>
4016     *   <li>
4017     *    REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
4018     *  REF: CALIB_USE_INTRINSIC_GUESS is set.
4019     *   </li>
4020     *   <li>
4021     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
4022     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
4023     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4024     *   </li>
4025     *   <li>
4026     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
4027     * backward compatibility, this extra flag should be explicitly specified to make the
4028     * calibration function use the rational model and return 8 coefficients or more.
4029     *   </li>
4030     *   <li>
4031     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
4032     * backward compatibility, this extra flag should be explicitly specified to make the
4033     * calibration function use the thin prism model and return 12 coefficients or more.
4034     *   </li>
4035     *   <li>
4036     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
4037     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4038     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4039     *   </li>
4040     *   <li>
4041     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
4042     * backward compatibility, this extra flag should be explicitly specified to make the
4043     * calibration function use the tilted sensor model and return 14 coefficients.
4044     *   </li>
4045     *   <li>
4046     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
4047     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4048     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4049     *   </li>
4050     * </ul>
4051     *
4052     * @return the overall RMS re-projection error.
4053     *
4054     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
4055     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
4056     * points and their corresponding 2D projections in each view must be specified. That may be achieved
4057     * by using an object with known geometry and easily detectable feature points. Such an object is
4058     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
4059     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
4060     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
4061     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
4062     * be used as long as initial cameraMatrix is provided.
4063     *
4064     * The algorithm performs the following steps:
4065     *
4066     * <ul>
4067     *   <li>
4068     *    Compute the initial intrinsic parameters (the option only available for planar calibration
4069     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
4070     *     zeros initially unless some of CALIB_FIX_K? are specified.
4071     *   </li>
4072     * </ul>
4073     *
4074     * <ul>
4075     *   <li>
4076     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
4077     *     done using REF: solvePnP .
4078     *   </li>
4079     * </ul>
4080     *
4081     * <ul>
4082     *   <li>
4083     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
4084     *     that is, the total sum of squared distances between the observed feature points imagePoints and
4085     *     the projected (using the current estimates for camera parameters and the poses) object points
4086     *     objectPoints. See REF: projectPoints for details.
4087     *   </li>
4088     * </ul>
4089     *
4090     * <b>Note:</b>
4091     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
4092     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
4093     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
4094     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
4095     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
4096     *
4097     * SEE:
4098     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
4099     *    undistort
4100     */
4101    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags) {
4102        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4103        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4104        Mat rvecs_mat = new Mat();
4105        Mat tvecs_mat = new Mat();
4106        double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags);
4107        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4108        rvecs_mat.release();
4109        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4110        tvecs_mat.release();
4111        return retVal;
4112    }
4113
4114    /**
4115     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration
4116     * pattern.
4117     *
4118     * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
4119     * the calibration pattern coordinate space (e.g. std::vector&lt;std::vector&lt;cv::Vec3f&gt;&gt;). The outer
4120     * vector contains as many elements as the number of pattern views. If the same calibration pattern
4121     * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
4122     * possible to use partially occluded patterns or even different patterns in different views. Then,
4123     * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
4124     * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
4125     * In the old interface all the vectors of object points from different views are concatenated
4126     * together.
4127     * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
4128     * pattern points (e.g. std::vector&lt;std::vector&lt;cv::Vec2f&gt;&gt;). imagePoints.size() and
4129     * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
4130     * respectively. In the old interface all the vectors of object points from different views are
4131     * concatenated together.
4132     * @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
4133     * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
4134     * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS
4135     * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH
4136     * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
4137     * @param distCoeffs Input/output vector of distortion coefficients
4138     * \(\distcoeffs\).
4139     * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view
4140     * (e.g. std::vector&lt;cv::Mat&gt;&gt;). That is, each i-th rotation vector together with the corresponding
4141     * i-th translation vector (see the next output parameter description) brings the calibration pattern
4142     * from the object coordinate space (in which object points are specified) to the camera coordinate
4143     * space. In more technical terms, the tuple of the i-th rotation and translation vector performs
4144     * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
4145     * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
4146     * space.
4147     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
4148     * describtion above.
4149     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
4150     * parameters. Order of deviations values:
4151     * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
4152     *  s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero.
4153     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
4154     * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is
4155     * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors.
4156     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4157     * <ul>
4158     *   <li>
4159     *    REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
4160     * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
4161     * center ( imageSize is used), and focal distances are computed in a least-squares fashion.
4162     * Note, that if intrinsic parameters are known, there is no need to use this function just to
4163     * estimate extrinsic parameters. Use REF: solvePnP instead.
4164     *   </li>
4165     *   <li>
4166     *    REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
4167     * optimization. It stays at the center or at a different location specified when
4168     *  REF: CALIB_USE_INTRINSIC_GUESS is set too.
4169     *   </li>
4170     *   <li>
4171     *    REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
4172     * ratio fx/fy stays the same as in the input cameraMatrix . When
4173     *  REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
4174     * ignored, only their ratio is computed and used further.
4175     *   </li>
4176     *   <li>
4177     *    REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set
4178     * to zeros and stay zero.
4179     *   </li>
4180     *   <li>
4181     *    REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
4182     *  REF: CALIB_USE_INTRINSIC_GUESS is set.
4183     *   </li>
4184     *   <li>
4185     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion
4186     * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is
4187     * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4188     *   </li>
4189     *   <li>
4190     *    REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
4191     * backward compatibility, this extra flag should be explicitly specified to make the
4192     * calibration function use the rational model and return 8 coefficients or more.
4193     *   </li>
4194     *   <li>
4195     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
4196     * backward compatibility, this extra flag should be explicitly specified to make the
4197     * calibration function use the thin prism model and return 12 coefficients or more.
4198     *   </li>
4199     *   <li>
4200     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
4201     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4202     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4203     *   </li>
4204     *   <li>
4205     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
4206     * backward compatibility, this extra flag should be explicitly specified to make the
4207     * calibration function use the tilted sensor model and return 14 coefficients.
4208     *   </li>
4209     *   <li>
4210     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
4211     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4212     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4213     *   </li>
4214     * </ul>
4215     *
4216     * @return the overall RMS re-projection error.
4217     *
4218     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
4219     * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object
4220     * points and their corresponding 2D projections in each view must be specified. That may be achieved
4221     * by using an object with known geometry and easily detectable feature points. Such an object is
4222     * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
4223     * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic
4224     * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
4225     * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
4226     * be used as long as initial cameraMatrix is provided.
4227     *
4228     * The algorithm performs the following steps:
4229     *
4230     * <ul>
4231     *   <li>
4232     *    Compute the initial intrinsic parameters (the option only available for planar calibration
4233     *     patterns) or read them from the input parameters. The distortion coefficients are all set to
4234     *     zeros initially unless some of CALIB_FIX_K? are specified.
4235     *   </li>
4236     * </ul>
4237     *
4238     * <ul>
4239     *   <li>
4240     *    Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
4241     *     done using REF: solvePnP .
4242     *   </li>
4243     * </ul>
4244     *
4245     * <ul>
4246     *   <li>
4247     *    Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
4248     *     that is, the total sum of squared distances between the observed feature points imagePoints and
4249     *     the projected (using the current estimates for camera parameters and the poses) object points
4250     *     objectPoints. See REF: projectPoints for details.
4251     *   </li>
4252     * </ul>
4253     *
4254     * <b>Note:</b>
4255     *     If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration,
4256     *     and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and
4257     *     \(c_y\) very far from the image center, and/or large differences between \(f_x\) and
4258     *     \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
4259     *     instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners.
4260     *
4261     * SEE:
4262     *    calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
4263     *    undistort
4264     */
4265    public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors) {
4266        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4267        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4268        Mat rvecs_mat = new Mat();
4269        Mat tvecs_mat = new Mat();
4270        double retVal = calibrateCameraExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj);
4271        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4272        rvecs_mat.release();
4273        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4274        tvecs_mat.release();
4275        return retVal;
4276    }
4277
4278
4279    //
4280    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
4281    //
4282
4283    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) {
4284        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4285        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4286        Mat rvecs_mat = new Mat();
4287        Mat tvecs_mat = new Mat();
4288        double retVal = calibrateCamera_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
4289        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4290        rvecs_mat.release();
4291        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4292        tvecs_mat.release();
4293        return retVal;
4294    }
4295
4296    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) {
4297        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4298        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4299        Mat rvecs_mat = new Mat();
4300        Mat tvecs_mat = new Mat();
4301        double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
4302        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4303        rvecs_mat.release();
4304        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4305        tvecs_mat.release();
4306        return retVal;
4307    }
4308
4309    public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) {
4310        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4311        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4312        Mat rvecs_mat = new Mat();
4313        Mat tvecs_mat = new Mat();
4314        double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
4315        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4316        rvecs_mat.release();
4317        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4318        tvecs_mat.release();
4319        return retVal;
4320    }
4321
4322
4323    //
4324    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
4325    //
4326
4327    /**
4328     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
4329     *
4330     * This function is an extension of #calibrateCamera with the method of releasing object which was
4331     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
4332     * targets (calibration plates), this method can dramatically improve the precision of the estimated
4333     * camera parameters. Both the object-releasing method and standard method are supported by this
4334     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
4335     * #calibrateCamera is a wrapper for this function.
4336     *
4337     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
4338     * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
4339     * the identical calibration board must be used in each view and it must be fully visible, and all
4340     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
4341     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
4342     * shifted for grabbing images.</b>
4343     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
4344     * #calibrateCamera for details.
4345     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
4346     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
4347     * a switch for calibration method selection. If object-releasing method to be used, pass in the
4348     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
4349     * make standard calibration method selected. Usually the top-right corner point of the calibration
4350     * board grid is recommended to be fixed when object-releasing method being utilized. According to
4351     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
4352     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
4353     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
4354     * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
4355     * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
4356     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
4357     * for details.
4358     * @param tvecs Output vector of translation vectors estimated for each pattern view.
4359     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
4360     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
4361     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
4362     * is ignored with standard calibration method.
4363     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
4364     * See #calibrateCamera for details.
4365     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
4366     * See #calibrateCamera for details.
4367     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
4368     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
4369     * parameter is ignored with standard calibration method.
4370     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4371     * @param flags Different flags that may be zero or a combination of some predefined values. See
4372     * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
4373     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
4374     * less precise and less stable in some rare cases.
4375     * @param criteria Termination criteria for the iterative optimization algorithm.
4376     *
4377     * @return the overall RMS re-projection error.
4378     *
4379     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
4380     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
4381     * #calibrateCamera for other detailed explanations.
4382     * SEE:
4383     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
4384     */
4385    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags, TermCriteria criteria) {
4386        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4387        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4388        Mat rvecs_mat = new Mat();
4389        Mat tvecs_mat = new Mat();
4390        double retVal = calibrateCameraROExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
4391        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4392        rvecs_mat.release();
4393        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4394        tvecs_mat.release();
4395        return retVal;
4396    }
4397
4398    /**
4399     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
4400     *
4401     * This function is an extension of #calibrateCamera with the method of releasing object which was
4402     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
4403     * targets (calibration plates), this method can dramatically improve the precision of the estimated
4404     * camera parameters. Both the object-releasing method and standard method are supported by this
4405     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
4406     * #calibrateCamera is a wrapper for this function.
4407     *
4408     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
4409     * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
4410     * the identical calibration board must be used in each view and it must be fully visible, and all
4411     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
4412     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
4413     * shifted for grabbing images.</b>
4414     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
4415     * #calibrateCamera for details.
4416     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
4417     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
4418     * a switch for calibration method selection. If object-releasing method to be used, pass in the
4419     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
4420     * make standard calibration method selected. Usually the top-right corner point of the calibration
4421     * board grid is recommended to be fixed when object-releasing method being utilized. According to
4422     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
4423     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
4424     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
4425     * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
4426     * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
4427     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
4428     * for details.
4429     * @param tvecs Output vector of translation vectors estimated for each pattern view.
4430     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
4431     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
4432     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
4433     * is ignored with standard calibration method.
4434     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
4435     * See #calibrateCamera for details.
4436     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
4437     * See #calibrateCamera for details.
4438     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
4439     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
4440     * parameter is ignored with standard calibration method.
4441     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4442     * @param flags Different flags that may be zero or a combination of some predefined values. See
4443     * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
4444     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
4445     * less precise and less stable in some rare cases.
4446     *
4447     * @return the overall RMS re-projection error.
4448     *
4449     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
4450     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
4451     * #calibrateCamera for other detailed explanations.
4452     * SEE:
4453     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
4454     */
4455    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags) {
4456        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4457        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4458        Mat rvecs_mat = new Mat();
4459        Mat tvecs_mat = new Mat();
4460        double retVal = calibrateCameraROExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags);
4461        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4462        rvecs_mat.release();
4463        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4464        tvecs_mat.release();
4465        return retVal;
4466    }
4467
4468    /**
4469     * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
4470     *
4471     * This function is an extension of #calibrateCamera with the method of releasing object which was
4472     * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
4473     * targets (calibration plates), this method can dramatically improve the precision of the estimated
4474     * camera parameters. Both the object-releasing method and standard method are supported by this
4475     * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation,
4476     * #calibrateCamera is a wrapper for this function.
4477     *
4478     * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
4479     * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
4480     * the identical calibration board must be used in each view and it must be fully visible, and all
4481     * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration
4482     * target has to be rigid, or at least static if the camera (rather than the calibration target) is
4483     * shifted for grabbing images.</b>
4484     * @param imagePoints Vector of vectors of the projections of calibration pattern points. See
4485     * #calibrateCamera for details.
4486     * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
4487     * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
4488     * a switch for calibration method selection. If object-releasing method to be used, pass in the
4489     * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
4490     * make standard calibration method selected. Usually the top-right corner point of the calibration
4491     * board grid is recommended to be fixed when object-releasing method being utilized. According to
4492     * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
4493     * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
4494     * newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
4495     * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
4496     * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
4497     * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
4498     * for details.
4499     * @param tvecs Output vector of translation vectors estimated for each pattern view.
4500     * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
4501     * be scaled based on three fixed points. The returned coordinates are accurate only if the above
4502     * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
4503     * is ignored with standard calibration method.
4504     * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
4505     * See #calibrateCamera for details.
4506     * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
4507     * See #calibrateCamera for details.
4508     * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
4509     * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
4510     * parameter is ignored with standard calibration method.
4511     *  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4512     * #calibrateCamera for details. If the method of releasing object is used, the calibration time may
4513     * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
4514     * less precise and less stable in some rare cases.
4515     *
4516     * @return the overall RMS re-projection error.
4517     *
4518     * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
4519     * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See
4520     * #calibrateCamera for other detailed explanations.
4521     * SEE:
4522     *    calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
4523     */
4524    public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors) {
4525        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4526        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4527        Mat rvecs_mat = new Mat();
4528        Mat tvecs_mat = new Mat();
4529        double retVal = calibrateCameraROExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj);
4530        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4531        rvecs_mat.release();
4532        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4533        tvecs_mat.release();
4534        return retVal;
4535    }
4536
4537
4538    //
4539    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
4540    //
4541
4542    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags, TermCriteria criteria) {
4543        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4544        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4545        Mat rvecs_mat = new Mat();
4546        Mat tvecs_mat = new Mat();
4547        double retVal = calibrateCameraRO_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
4548        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4549        rvecs_mat.release();
4550        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4551        tvecs_mat.release();
4552        return retVal;
4553    }
4554
4555    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags) {
4556        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4557        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4558        Mat rvecs_mat = new Mat();
4559        Mat tvecs_mat = new Mat();
4560        double retVal = calibrateCameraRO_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags);
4561        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4562        rvecs_mat.release();
4563        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4564        tvecs_mat.release();
4565        return retVal;
4566    }
4567
4568    public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints) {
4569        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4570        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
4571        Mat rvecs_mat = new Mat();
4572        Mat tvecs_mat = new Mat();
4573        double retVal = calibrateCameraRO_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj);
4574        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4575        rvecs_mat.release();
4576        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4577        tvecs_mat.release();
4578        return retVal;
4579    }
4580
4581
4582    //
4583    // C++:  void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
4584    //
4585
4586    /**
4587     * Computes useful camera characteristics from the camera intrinsic matrix.
4588     *
4589     * @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or
4590     * #stereoCalibrate .
4591     * @param imageSize Input image size in pixels.
4592     * @param apertureWidth Physical width in mm of the sensor.
4593     * @param apertureHeight Physical height in mm of the sensor.
4594     * @param fovx Output field of view in degrees along the horizontal sensor axis.
4595     * @param fovy Output field of view in degrees along the vertical sensor axis.
4596     * @param focalLength Focal length of the lens in mm.
4597     * @param principalPoint Principal point in mm.
4598     * @param aspectRatio \(f_y/f_x\)
4599     *
4600     * The function computes various useful camera characteristics from the previously estimated camera
4601     * matrix.
4602     *
4603     * <b>Note:</b>
4604     *    Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
4605     *     the chessboard pitch (it can thus be any value).
4606     */
4607    public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio) {
4608        double[] fovx_out = new double[1];
4609        double[] fovy_out = new double[1];
4610        double[] focalLength_out = new double[1];
4611        double[] principalPoint_out = new double[2];
4612        double[] aspectRatio_out = new double[1];
4613        calibrationMatrixValues_0(cameraMatrix.nativeObj, imageSize.width, imageSize.height, apertureWidth, apertureHeight, fovx_out, fovy_out, focalLength_out, principalPoint_out, aspectRatio_out);
4614        if(fovx!=null) fovx[0] = (double)fovx_out[0];
4615        if(fovy!=null) fovy[0] = (double)fovy_out[0];
4616        if(focalLength!=null) focalLength[0] = (double)focalLength_out[0];
4617        if(principalPoint!=null){ principalPoint.x = principalPoint_out[0]; principalPoint.y = principalPoint_out[1]; } 
4618        if(aspectRatio!=null) aspectRatio[0] = (double)aspectRatio_out[0];
4619    }
4620
4621
4622    //
4623    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, vector_Mat& rvecs, vector_Mat& tvecs, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
4624    //
4625
4626    /**
4627     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
4628     * for each of the two cameras and the extrinsic parameters between the two cameras.
4629     *
4630     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
4631     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
4632     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
4633     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
4634     * be equal for each i.
4635     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
4636     * observed by the first camera. The same structure as in REF: calibrateCamera.
4637     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
4638     * observed by the second camera. The same structure as in REF: calibrateCamera.
4639     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
4640     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
4641     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
4642     * REF: calibrateCamera.
4643     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
4644     * cameraMatrix1.
4645     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
4646     * description for distCoeffs1.
4647     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
4648     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
4649     * points given in the first camera's coordinate system to points in the second camera's
4650     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
4651     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
4652     * duality, this tuple is equivalent to the position of the first camera with respect to the
4653     * second camera coordinate system.
4654     * @param T Output translation vector, see description above.
4655     * @param E Output essential matrix.
4656     * @param F Output fundamental matrix.
4657     * @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
4658     * coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
4659     * i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
4660     * description) brings the calibration pattern from the object coordinate space (in which object points are
4661     * specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
4662     * the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
4663     * to camera coordinate space of the first camera of the stereo pair.
4664     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
4665     * of previous output parameter ( rvecs ).
4666     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4667     * @param flags Different flags that may be zero or a combination of the following values:
4668     * <ul>
4669     *   <li>
4670     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
4671     * matrices are estimated.
4672     *   </li>
4673     *   <li>
4674     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
4675     * according to the specified flags. Initial values are provided by the user.
4676     *   </li>
4677     *   <li>
4678     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
4679     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
4680     *   </li>
4681     *   <li>
4682     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
4683     *   </li>
4684     *   <li>
4685     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
4686     *   </li>
4687     *   <li>
4688     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
4689     * .
4690     *   </li>
4691     *   <li>
4692     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
4693     *   </li>
4694     *   <li>
4695     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
4696     * zeros and fix there.
4697     *   </li>
4698     *   <li>
4699     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
4700     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
4701     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4702     *   </li>
4703     *   <li>
4704     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
4705     * compatibility, this extra flag should be explicitly specified to make the calibration
4706     * function use the rational model and return 8 coefficients. If the flag is not set, the
4707     * function computes and returns only 5 distortion coefficients.
4708     *   </li>
4709     *   <li>
4710     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
4711     * backward compatibility, this extra flag should be explicitly specified to make the
4712     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
4713     * set, the function computes and returns only 5 distortion coefficients.
4714     *   </li>
4715     *   <li>
4716     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
4717     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4718     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4719     *   </li>
4720     *   <li>
4721     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
4722     * backward compatibility, this extra flag should be explicitly specified to make the
4723     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
4724     * set, the function computes and returns only 5 distortion coefficients.
4725     *   </li>
4726     *   <li>
4727     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
4728     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4729     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4730     *   </li>
4731     * </ul>
4732     * @param criteria Termination criteria for the iterative optimization algorithm.
4733     *
4734     * The function estimates the transformation between two cameras making a stereo pair. If one computes
4735     * the poses of an object relative to the first camera and to the second camera,
4736     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
4737     * relative position and orientation between the two cameras are fixed, then those poses definitely
4738     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
4739     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
4740     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
4741     *
4742     * \(R_2=R R_1\)
4743     * \(T_2=R T_1 + T.\)
4744     *
4745     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
4746     * coordinate system when given the point's coordinate representation in the first camera's coordinate
4747     * system:
4748     *
4749     * \(\begin{bmatrix}
4750     * X_2 \\
4751     * Y_2 \\
4752     * Z_2 \\
4753     * 1
4754     * \end{bmatrix} = \begin{bmatrix}
4755     * R &amp; T \\
4756     * 0 &amp; 1
4757     * \end{bmatrix} \begin{bmatrix}
4758     * X_1 \\
4759     * Y_1 \\
4760     * Z_1 \\
4761     * 1
4762     * \end{bmatrix}.\)
4763     *
4764     *
4765     * Optionally, it computes the essential matrix E:
4766     *
4767     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
4768     *
4769     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
4770     * And the function can also compute the fundamental matrix F:
4771     *
4772     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
4773     *
4774     * Besides the stereo-related information, the function can also perform a full calibration of each of
4775     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
4776     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
4777     * estimated with high accuracy for each of the cameras individually (for example, using
4778     * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
4779     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
4780     * estimated at once, it makes sense to restrict some parameters, for example, pass
4781     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
4782     * reasonable assumption.
4783     *
4784     * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
4785     * points in all the available views from both cameras. The function returns the final value of the
4786     * re-projection error.
4787     * @return automatically generated
4788     */
4789    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, List<Mat> rvecs, List<Mat> tvecs, Mat perViewErrors, int flags, TermCriteria criteria) {
4790        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4791        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
4792        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
4793        Mat rvecs_mat = new Mat();
4794        Mat tvecs_mat = new Mat();
4795        double retVal = stereoCalibrateExtended_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
4796        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4797        rvecs_mat.release();
4798        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4799        tvecs_mat.release();
4800        return retVal;
4801    }
4802
4803    /**
4804     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
4805     * for each of the two cameras and the extrinsic parameters between the two cameras.
4806     *
4807     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
4808     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
4809     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
4810     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
4811     * be equal for each i.
4812     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
4813     * observed by the first camera. The same structure as in REF: calibrateCamera.
4814     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
4815     * observed by the second camera. The same structure as in REF: calibrateCamera.
4816     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
4817     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
4818     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
4819     * REF: calibrateCamera.
4820     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
4821     * cameraMatrix1.
4822     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
4823     * description for distCoeffs1.
4824     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
4825     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
4826     * points given in the first camera's coordinate system to points in the second camera's
4827     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
4828     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
4829     * duality, this tuple is equivalent to the position of the first camera with respect to the
4830     * second camera coordinate system.
4831     * @param T Output translation vector, see description above.
4832     * @param E Output essential matrix.
4833     * @param F Output fundamental matrix.
4834     * @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
4835     * coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
4836     * i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
4837     * description) brings the calibration pattern from the object coordinate space (in which object points are
4838     * specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
4839     * the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
4840     * to camera coordinate space of the first camera of the stereo pair.
4841     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
4842     * of previous output parameter ( rvecs ).
4843     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
4844     * @param flags Different flags that may be zero or a combination of the following values:
4845     * <ul>
4846     *   <li>
4847     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
4848     * matrices are estimated.
4849     *   </li>
4850     *   <li>
4851     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
4852     * according to the specified flags. Initial values are provided by the user.
4853     *   </li>
4854     *   <li>
4855     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
4856     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
4857     *   </li>
4858     *   <li>
4859     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
4860     *   </li>
4861     *   <li>
4862     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
4863     *   </li>
4864     *   <li>
4865     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
4866     * .
4867     *   </li>
4868     *   <li>
4869     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
4870     *   </li>
4871     *   <li>
4872     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
4873     * zeros and fix there.
4874     *   </li>
4875     *   <li>
4876     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
4877     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
4878     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4879     *   </li>
4880     *   <li>
4881     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
4882     * compatibility, this extra flag should be explicitly specified to make the calibration
4883     * function use the rational model and return 8 coefficients. If the flag is not set, the
4884     * function computes and returns only 5 distortion coefficients.
4885     *   </li>
4886     *   <li>
4887     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
4888     * backward compatibility, this extra flag should be explicitly specified to make the
4889     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
4890     * set, the function computes and returns only 5 distortion coefficients.
4891     *   </li>
4892     *   <li>
4893     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
4894     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4895     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4896     *   </li>
4897     *   <li>
4898     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
4899     * backward compatibility, this extra flag should be explicitly specified to make the
4900     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
4901     * set, the function computes and returns only 5 distortion coefficients.
4902     *   </li>
4903     *   <li>
4904     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
4905     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
4906     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
4907     *   </li>
4908     * </ul>
4909     *
4910     * The function estimates the transformation between two cameras making a stereo pair. If one computes
4911     * the poses of an object relative to the first camera and to the second camera,
4912     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
4913     * relative position and orientation between the two cameras are fixed, then those poses definitely
4914     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
4915     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
4916     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
4917     *
4918     * \(R_2=R R_1\)
4919     * \(T_2=R T_1 + T.\)
4920     *
4921     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
4922     * coordinate system when given the point's coordinate representation in the first camera's coordinate
4923     * system:
4924     *
4925     * \(\begin{bmatrix}
4926     * X_2 \\
4927     * Y_2 \\
4928     * Z_2 \\
4929     * 1
4930     * \end{bmatrix} = \begin{bmatrix}
4931     * R &amp; T \\
4932     * 0 &amp; 1
4933     * \end{bmatrix} \begin{bmatrix}
4934     * X_1 \\
4935     * Y_1 \\
4936     * Z_1 \\
4937     * 1
4938     * \end{bmatrix}.\)
4939     *
4940     *
4941     * Optionally, it computes the essential matrix E:
4942     *
4943     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
4944     *
4945     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
4946     * And the function can also compute the fundamental matrix F:
4947     *
4948     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
4949     *
4950     * Besides the stereo-related information, the function can also perform a full calibration of each of
4951     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
4952     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
4953     * estimated with high accuracy for each of the cameras individually (for example, using
4954     * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
4955     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
4956     * estimated at once, it makes sense to restrict some parameters, for example, pass
4957     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
4958     * reasonable assumption.
4959     *
4960     * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
4961     * points in all the available views from both cameras. The function returns the final value of the
4962     * re-projection error.
4963     * @return automatically generated
4964     */
4965    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, List<Mat> rvecs, List<Mat> tvecs, Mat perViewErrors, int flags) {
4966        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
4967        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
4968        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
4969        Mat rvecs_mat = new Mat();
4970        Mat tvecs_mat = new Mat();
4971        double retVal = stereoCalibrateExtended_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, perViewErrors.nativeObj, flags);
4972        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
4973        rvecs_mat.release();
4974        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
4975        tvecs_mat.release();
4976        return retVal;
4977    }
4978
4979    /**
4980     * Calibrates a stereo camera set up. This function finds the intrinsic parameters
4981     * for each of the two cameras and the extrinsic parameters between the two cameras.
4982     *
4983     * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
4984     * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object
4985     * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
4986     * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
4987     * be equal for each i.
4988     * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
4989     * observed by the first camera. The same structure as in REF: calibrateCamera.
4990     * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
4991     * observed by the second camera. The same structure as in REF: calibrateCamera.
4992     * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
4993     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
4994     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
4995     * REF: calibrateCamera.
4996     * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
4997     * cameraMatrix1.
4998     * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
4999     * description for distCoeffs1.
5000     * @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
5001     * @param R Output rotation matrix. Together with the translation vector T, this matrix brings
5002     * points given in the first camera's coordinate system to points in the second camera's
5003     * coordinate system. In more technical terms, the tuple of R and T performs a change of basis
5004     * from the first camera's coordinate system to the second camera's coordinate system. Due to its
5005     * duality, this tuple is equivalent to the position of the first camera with respect to the
5006     * second camera coordinate system.
5007     * @param T Output translation vector, see description above.
5008     * @param E Output essential matrix.
5009     * @param F Output fundamental matrix.
5010     * @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
5011     * coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
5012     * i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
5013     * description) brings the calibration pattern from the object coordinate space (in which object points are
5014     * specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
5015     * the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
5016     * to camera coordinate space of the first camera of the stereo pair.
5017     * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
5018     * of previous output parameter ( rvecs ).
5019     * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
5020     * <ul>
5021     *   <li>
5022     *    REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
5023     * matrices are estimated.
5024     *   </li>
5025     *   <li>
5026     *    REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
5027     * according to the specified flags. Initial values are provided by the user.
5028     *   </li>
5029     *   <li>
5030     *    REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
5031     * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
5032     *   </li>
5033     *   <li>
5034     *    REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
5035     *   </li>
5036     *   <li>
5037     *    REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) .
5038     *   </li>
5039     *   <li>
5040     *    REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\)
5041     * .
5042     *   </li>
5043     *   <li>
5044     *    REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) .
5045     *   </li>
5046     *   <li>
5047     *    REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
5048     * zeros and fix there.
5049     *   </li>
5050     *   <li>
5051     *    REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial
5052     * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set,
5053     * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5054     *   </li>
5055     *   <li>
5056     *    REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
5057     * compatibility, this extra flag should be explicitly specified to make the calibration
5058     * function use the rational model and return 8 coefficients. If the flag is not set, the
5059     * function computes and returns only 5 distortion coefficients.
5060     *   </li>
5061     *   <li>
5062     *    REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
5063     * backward compatibility, this extra flag should be explicitly specified to make the
5064     * calibration function use the thin prism model and return 12 coefficients. If the flag is not
5065     * set, the function computes and returns only 5 distortion coefficients.
5066     *   </li>
5067     *   <li>
5068     *    REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
5069     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5070     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5071     *   </li>
5072     *   <li>
5073     *    REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
5074     * backward compatibility, this extra flag should be explicitly specified to make the
5075     * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
5076     * set, the function computes and returns only 5 distortion coefficients.
5077     *   </li>
5078     *   <li>
5079     *    REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
5080     * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
5081     * supplied distCoeffs matrix is used. Otherwise, it is set to 0.
5082     *   </li>
5083     * </ul>
5084     *
5085     * The function estimates the transformation between two cameras making a stereo pair. If one computes
5086     * the poses of an object relative to the first camera and to the second camera,
5087     * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the
5088     * relative position and orientation between the two cameras are fixed, then those poses definitely
5089     * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the
5090     * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is
5091     * given. This is what the described function does. It computes (\(R\),\(T\)) such that:
5092     *
5093     * \(R_2=R R_1\)
5094     * \(T_2=R T_1 + T.\)
5095     *
5096     * Therefore, one can compute the coordinate representation of a 3D point for the second camera's
5097     * coordinate system when given the point's coordinate representation in the first camera's coordinate
5098     * system:
5099     *
5100     * \(\begin{bmatrix}
5101     * X_2 \\
5102     * Y_2 \\
5103     * Z_2 \\
5104     * 1
5105     * \end{bmatrix} = \begin{bmatrix}
5106     * R &amp; T \\
5107     * 0 &amp; 1
5108     * \end{bmatrix} \begin{bmatrix}
5109     * X_1 \\
5110     * Y_1 \\
5111     * Z_1 \\
5112     * 1
5113     * \end{bmatrix}.\)
5114     *
5115     *
5116     * Optionally, it computes the essential matrix E:
5117     *
5118     * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\)
5119     *
5120     * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) .
5121     * And the function can also compute the fundamental matrix F:
5122     *
5123     * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\)
5124     *
5125     * Besides the stereo-related information, the function can also perform a full calibration of each of
5126     * the two cameras. However, due to the high dimensionality of the parameter space and noise in the
5127     * input data, the function can diverge from the correct solution. If the intrinsic parameters can be
5128     * estimated with high accuracy for each of the cameras individually (for example, using
5129     * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the
5130     * function along with the computed intrinsic parameters. Otherwise, if all the parameters are
5131     * estimated at once, it makes sense to restrict some parameters, for example, pass
5132     *  REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a
5133     * reasonable assumption.
5134     *
5135     * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
5136     * points in all the available views from both cameras. The function returns the final value of the
5137     * re-projection error.
5138     * @return automatically generated
5139     */
5140    public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, List<Mat> rvecs, List<Mat> tvecs, Mat perViewErrors) {
5141        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5142        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5143        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5144        Mat rvecs_mat = new Mat();
5145        Mat tvecs_mat = new Mat();
5146        double retVal = stereoCalibrateExtended_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, perViewErrors.nativeObj);
5147        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
5148        rvecs_mat.release();
5149        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
5150        tvecs_mat.release();
5151        return retVal;
5152    }
5153
5154
5155    //
5156    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
5157    //
5158
5159    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria) {
5160        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5161        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5162        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5163        return stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5164    }
5165
5166    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags) {
5167        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5168        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5169        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5170        return stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags);
5171    }
5172
5173    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F) {
5174        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5175        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5176        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5177        return stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj);
5178    }
5179
5180
5181    //
5182    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
5183    //
5184
5185    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags, TermCriteria criteria) {
5186        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5187        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5188        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5189        return stereoCalibrate_3(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
5190    }
5191
5192    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags) {
5193        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5194        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5195        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5196        return stereoCalibrate_4(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags);
5197    }
5198
5199    public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors) {
5200        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
5201        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
5202        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
5203        return stereoCalibrate_5(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj);
5204    }
5205
5206
5207    //
5208    // C++:  void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
5209    //
5210
5211    /**
5212     * Computes rectification transforms for each head of a calibrated stereo camera.
5213     *
5214     * @param cameraMatrix1 First camera intrinsic matrix.
5215     * @param distCoeffs1 First camera distortion parameters.
5216     * @param cameraMatrix2 Second camera intrinsic matrix.
5217     * @param distCoeffs2 Second camera distortion parameters.
5218     * @param imageSize Size of the image used for stereo calibration.
5219     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5220     * see REF: stereoCalibrate.
5221     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5222     * see REF: stereoCalibrate.
5223     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5224     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5225     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5226     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5227     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5228     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5229     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5230     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5231     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5232     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5233     * rectified first camera's image.
5234     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5235     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5236     * rectified second camera's image.
5237     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5238     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
5239     * the function makes the principal points of each camera have the same pixel coordinates in the
5240     * rectified views. And if the flag is not set, the function may still shift the images in the
5241     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5242     * useful image area.
5243     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
5244     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5245     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5246     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5247     * pixels from the original images from the cameras are retained in the rectified images (no source
5248     * image pixels are lost). Any intermediate value yields an intermediate result between
5249     * those two extreme cases.
5250     * @param newImageSize New image resolution after rectification. The same size should be passed to
5251     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5252     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5253     * preserve details in the original image, especially when there is a big radial distortion.
5254     * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
5255     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5256     * (see the picture below).
5257     * @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
5258     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5259     * (see the picture below).
5260     *
5261     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5262     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5263     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5264     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5265     * coordinates. The function distinguishes the following two cases:
5266     *
5267     * <ul>
5268     *   <li>
5269     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5270     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5271     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5272     *     y-coordinate. P1 and P2 look like:
5273     *   </li>
5274     * </ul>
5275     *
5276     *     \(\texttt{P1} = \begin{bmatrix}
5277     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5278     *                         0 &amp; f &amp; cy &amp; 0 \\
5279     *                         0 &amp; 0 &amp; 1 &amp; 0
5280     *                      \end{bmatrix}\)
5281     *
5282     *     \(\texttt{P2} = \begin{bmatrix}
5283     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5284     *                         0 &amp; f &amp; cy &amp; 0 \\
5285     *                         0 &amp; 0 &amp; 1 &amp; 0
5286     *                      \end{bmatrix} ,\)
5287     *
5288     *     \(\texttt{Q} = \begin{bmatrix}
5289     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5290     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5291     *                         0 &amp; 0 &amp; 0 &amp; f \\
5292     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5293     *                     \end{bmatrix} \)
5294     *
5295     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5296     *     REF: CALIB_ZERO_DISPARITY is set.
5297     *
5298     * <ul>
5299     *   <li>
5300     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5301     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5302     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5303     *   </li>
5304     * </ul>
5305     *
5306     *     \(\texttt{P1} = \begin{bmatrix}
5307     *                         f &amp; 0 &amp; cx &amp; 0 \\
5308     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5309     *                         0 &amp; 0 &amp; 1 &amp; 0
5310     *                      \end{bmatrix}\)
5311     *
5312     *     \(\texttt{P2} = \begin{bmatrix}
5313     *                         f &amp; 0 &amp; cx &amp; 0 \\
5314     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5315     *                         0 &amp; 0 &amp; 1 &amp; 0
5316     *                      \end{bmatrix},\)
5317     *
5318     *     \(\texttt{Q} = \begin{bmatrix}
5319     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5320     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5321     *                         0 &amp; 0 &amp; 0 &amp; f \\
5322     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5323     *                     \end{bmatrix} \)
5324     *
5325     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5326     *     REF: CALIB_ZERO_DISPARITY is set.
5327     *
5328     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5329     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5330     * initialize the rectification map for each camera.
5331     *
5332     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5333     * the corresponding image regions. This means that the images are well rectified, which is what most
5334     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5335     * their interiors are all valid pixels.
5336     *
5337     * ![image](pics/stereo_undistort.jpg)
5338     */
5339    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2) {
5340        double[] validPixROI1_out = new double[4];
5341        double[] validPixROI2_out = new double[4];
5342        stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out);
5343        if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 
5344        if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } 
5345    }
5346
5347    /**
5348     * Computes rectification transforms for each head of a calibrated stereo camera.
5349     *
5350     * @param cameraMatrix1 First camera intrinsic matrix.
5351     * @param distCoeffs1 First camera distortion parameters.
5352     * @param cameraMatrix2 Second camera intrinsic matrix.
5353     * @param distCoeffs2 Second camera distortion parameters.
5354     * @param imageSize Size of the image used for stereo calibration.
5355     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5356     * see REF: stereoCalibrate.
5357     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5358     * see REF: stereoCalibrate.
5359     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5360     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5361     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5362     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5363     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5364     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5365     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5366     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5367     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5368     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5369     * rectified first camera's image.
5370     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5371     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5372     * rectified second camera's image.
5373     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5374     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
5375     * the function makes the principal points of each camera have the same pixel coordinates in the
5376     * rectified views. And if the flag is not set, the function may still shift the images in the
5377     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5378     * useful image area.
5379     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
5380     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5381     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5382     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5383     * pixels from the original images from the cameras are retained in the rectified images (no source
5384     * image pixels are lost). Any intermediate value yields an intermediate result between
5385     * those two extreme cases.
5386     * @param newImageSize New image resolution after rectification. The same size should be passed to
5387     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5388     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5389     * preserve details in the original image, especially when there is a big radial distortion.
5390     * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
5391     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5392     * (see the picture below).
5393     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5394     * (see the picture below).
5395     *
5396     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5397     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5398     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5399     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5400     * coordinates. The function distinguishes the following two cases:
5401     *
5402     * <ul>
5403     *   <li>
5404     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5405     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5406     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5407     *     y-coordinate. P1 and P2 look like:
5408     *   </li>
5409     * </ul>
5410     *
5411     *     \(\texttt{P1} = \begin{bmatrix}
5412     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5413     *                         0 &amp; f &amp; cy &amp; 0 \\
5414     *                         0 &amp; 0 &amp; 1 &amp; 0
5415     *                      \end{bmatrix}\)
5416     *
5417     *     \(\texttt{P2} = \begin{bmatrix}
5418     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5419     *                         0 &amp; f &amp; cy &amp; 0 \\
5420     *                         0 &amp; 0 &amp; 1 &amp; 0
5421     *                      \end{bmatrix} ,\)
5422     *
5423     *     \(\texttt{Q} = \begin{bmatrix}
5424     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5425     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5426     *                         0 &amp; 0 &amp; 0 &amp; f \\
5427     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5428     *                     \end{bmatrix} \)
5429     *
5430     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5431     *     REF: CALIB_ZERO_DISPARITY is set.
5432     *
5433     * <ul>
5434     *   <li>
5435     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5436     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5437     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5438     *   </li>
5439     * </ul>
5440     *
5441     *     \(\texttt{P1} = \begin{bmatrix}
5442     *                         f &amp; 0 &amp; cx &amp; 0 \\
5443     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5444     *                         0 &amp; 0 &amp; 1 &amp; 0
5445     *                      \end{bmatrix}\)
5446     *
5447     *     \(\texttt{P2} = \begin{bmatrix}
5448     *                         f &amp; 0 &amp; cx &amp; 0 \\
5449     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5450     *                         0 &amp; 0 &amp; 1 &amp; 0
5451     *                      \end{bmatrix},\)
5452     *
5453     *     \(\texttt{Q} = \begin{bmatrix}
5454     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5455     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5456     *                         0 &amp; 0 &amp; 0 &amp; f \\
5457     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5458     *                     \end{bmatrix} \)
5459     *
5460     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5461     *     REF: CALIB_ZERO_DISPARITY is set.
5462     *
5463     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5464     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5465     * initialize the rectification map for each camera.
5466     *
5467     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5468     * the corresponding image regions. This means that the images are well rectified, which is what most
5469     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5470     * their interiors are all valid pixels.
5471     *
5472     * ![image](pics/stereo_undistort.jpg)
5473     */
5474    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1) {
5475        double[] validPixROI1_out = new double[4];
5476        stereoRectify_1(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out);
5477        if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 
5478    }
5479
5480    /**
5481     * Computes rectification transforms for each head of a calibrated stereo camera.
5482     *
5483     * @param cameraMatrix1 First camera intrinsic matrix.
5484     * @param distCoeffs1 First camera distortion parameters.
5485     * @param cameraMatrix2 Second camera intrinsic matrix.
5486     * @param distCoeffs2 Second camera distortion parameters.
5487     * @param imageSize Size of the image used for stereo calibration.
5488     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5489     * see REF: stereoCalibrate.
5490     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5491     * see REF: stereoCalibrate.
5492     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5493     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5494     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5495     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5496     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5497     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5498     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5499     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5500     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5501     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5502     * rectified first camera's image.
5503     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5504     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5505     * rectified second camera's image.
5506     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5507     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
5508     * the function makes the principal points of each camera have the same pixel coordinates in the
5509     * rectified views. And if the flag is not set, the function may still shift the images in the
5510     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5511     * useful image area.
5512     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
5513     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5514     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5515     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5516     * pixels from the original images from the cameras are retained in the rectified images (no source
5517     * image pixels are lost). Any intermediate value yields an intermediate result between
5518     * those two extreme cases.
5519     * @param newImageSize New image resolution after rectification. The same size should be passed to
5520     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5521     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5522     * preserve details in the original image, especially when there is a big radial distortion.
5523     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5524     * (see the picture below).
5525     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5526     * (see the picture below).
5527     *
5528     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5529     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5530     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5531     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5532     * coordinates. The function distinguishes the following two cases:
5533     *
5534     * <ul>
5535     *   <li>
5536     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5537     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5538     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5539     *     y-coordinate. P1 and P2 look like:
5540     *   </li>
5541     * </ul>
5542     *
5543     *     \(\texttt{P1} = \begin{bmatrix}
5544     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5545     *                         0 &amp; f &amp; cy &amp; 0 \\
5546     *                         0 &amp; 0 &amp; 1 &amp; 0
5547     *                      \end{bmatrix}\)
5548     *
5549     *     \(\texttt{P2} = \begin{bmatrix}
5550     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5551     *                         0 &amp; f &amp; cy &amp; 0 \\
5552     *                         0 &amp; 0 &amp; 1 &amp; 0
5553     *                      \end{bmatrix} ,\)
5554     *
5555     *     \(\texttt{Q} = \begin{bmatrix}
5556     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5557     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5558     *                         0 &amp; 0 &amp; 0 &amp; f \\
5559     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5560     *                     \end{bmatrix} \)
5561     *
5562     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5563     *     REF: CALIB_ZERO_DISPARITY is set.
5564     *
5565     * <ul>
5566     *   <li>
5567     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5568     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5569     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5570     *   </li>
5571     * </ul>
5572     *
5573     *     \(\texttt{P1} = \begin{bmatrix}
5574     *                         f &amp; 0 &amp; cx &amp; 0 \\
5575     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5576     *                         0 &amp; 0 &amp; 1 &amp; 0
5577     *                      \end{bmatrix}\)
5578     *
5579     *     \(\texttt{P2} = \begin{bmatrix}
5580     *                         f &amp; 0 &amp; cx &amp; 0 \\
5581     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5582     *                         0 &amp; 0 &amp; 1 &amp; 0
5583     *                      \end{bmatrix},\)
5584     *
5585     *     \(\texttt{Q} = \begin{bmatrix}
5586     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5587     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5588     *                         0 &amp; 0 &amp; 0 &amp; f \\
5589     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5590     *                     \end{bmatrix} \)
5591     *
5592     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5593     *     REF: CALIB_ZERO_DISPARITY is set.
5594     *
5595     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5596     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5597     * initialize the rectification map for each camera.
5598     *
5599     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5600     * the corresponding image regions. This means that the images are well rectified, which is what most
5601     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5602     * their interiors are all valid pixels.
5603     *
5604     * ![image](pics/stereo_undistort.jpg)
5605     */
5606    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize) {
5607        stereoRectify_2(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height);
5608    }
5609
5610    /**
5611     * Computes rectification transforms for each head of a calibrated stereo camera.
5612     *
5613     * @param cameraMatrix1 First camera intrinsic matrix.
5614     * @param distCoeffs1 First camera distortion parameters.
5615     * @param cameraMatrix2 Second camera intrinsic matrix.
5616     * @param distCoeffs2 Second camera distortion parameters.
5617     * @param imageSize Size of the image used for stereo calibration.
5618     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5619     * see REF: stereoCalibrate.
5620     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5621     * see REF: stereoCalibrate.
5622     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5623     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5624     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5625     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5626     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5627     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5628     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5629     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5630     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5631     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5632     * rectified first camera's image.
5633     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5634     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5635     * rectified second camera's image.
5636     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5637     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
5638     * the function makes the principal points of each camera have the same pixel coordinates in the
5639     * rectified views. And if the flag is not set, the function may still shift the images in the
5640     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5641     * useful image area.
5642     * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
5643     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5644     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5645     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5646     * pixels from the original images from the cameras are retained in the rectified images (no source
5647     * image pixels are lost). Any intermediate value yields an intermediate result between
5648     * those two extreme cases.
5649     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5650     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5651     * preserve details in the original image, especially when there is a big radial distortion.
5652     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5653     * (see the picture below).
5654     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5655     * (see the picture below).
5656     *
5657     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5658     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5659     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5660     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5661     * coordinates. The function distinguishes the following two cases:
5662     *
5663     * <ul>
5664     *   <li>
5665     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5666     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5667     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5668     *     y-coordinate. P1 and P2 look like:
5669     *   </li>
5670     * </ul>
5671     *
5672     *     \(\texttt{P1} = \begin{bmatrix}
5673     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5674     *                         0 &amp; f &amp; cy &amp; 0 \\
5675     *                         0 &amp; 0 &amp; 1 &amp; 0
5676     *                      \end{bmatrix}\)
5677     *
5678     *     \(\texttt{P2} = \begin{bmatrix}
5679     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5680     *                         0 &amp; f &amp; cy &amp; 0 \\
5681     *                         0 &amp; 0 &amp; 1 &amp; 0
5682     *                      \end{bmatrix} ,\)
5683     *
5684     *     \(\texttt{Q} = \begin{bmatrix}
5685     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5686     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5687     *                         0 &amp; 0 &amp; 0 &amp; f \\
5688     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5689     *                     \end{bmatrix} \)
5690     *
5691     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5692     *     REF: CALIB_ZERO_DISPARITY is set.
5693     *
5694     * <ul>
5695     *   <li>
5696     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5697     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5698     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5699     *   </li>
5700     * </ul>
5701     *
5702     *     \(\texttt{P1} = \begin{bmatrix}
5703     *                         f &amp; 0 &amp; cx &amp; 0 \\
5704     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5705     *                         0 &amp; 0 &amp; 1 &amp; 0
5706     *                      \end{bmatrix}\)
5707     *
5708     *     \(\texttt{P2} = \begin{bmatrix}
5709     *                         f &amp; 0 &amp; cx &amp; 0 \\
5710     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5711     *                         0 &amp; 0 &amp; 1 &amp; 0
5712     *                      \end{bmatrix},\)
5713     *
5714     *     \(\texttt{Q} = \begin{bmatrix}
5715     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5716     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5717     *                         0 &amp; 0 &amp; 0 &amp; f \\
5718     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5719     *                     \end{bmatrix} \)
5720     *
5721     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5722     *     REF: CALIB_ZERO_DISPARITY is set.
5723     *
5724     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5725     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5726     * initialize the rectification map for each camera.
5727     *
5728     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5729     * the corresponding image regions. This means that the images are well rectified, which is what most
5730     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5731     * their interiors are all valid pixels.
5732     *
5733     * ![image](pics/stereo_undistort.jpg)
5734     */
5735    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha) {
5736        stereoRectify_3(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha);
5737    }
5738
5739    /**
5740     * Computes rectification transforms for each head of a calibrated stereo camera.
5741     *
5742     * @param cameraMatrix1 First camera intrinsic matrix.
5743     * @param distCoeffs1 First camera distortion parameters.
5744     * @param cameraMatrix2 Second camera intrinsic matrix.
5745     * @param distCoeffs2 Second camera distortion parameters.
5746     * @param imageSize Size of the image used for stereo calibration.
5747     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5748     * see REF: stereoCalibrate.
5749     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5750     * see REF: stereoCalibrate.
5751     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5752     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5753     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5754     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5755     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5756     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5757     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5758     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5759     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5760     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5761     * rectified first camera's image.
5762     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5763     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5764     * rectified second camera's image.
5765     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5766     * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set,
5767     * the function makes the principal points of each camera have the same pixel coordinates in the
5768     * rectified views. And if the flag is not set, the function may still shift the images in the
5769     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5770     * useful image area.
5771     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5772     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5773     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5774     * pixels from the original images from the cameras are retained in the rectified images (no source
5775     * image pixels are lost). Any intermediate value yields an intermediate result between
5776     * those two extreme cases.
5777     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5778     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5779     * preserve details in the original image, especially when there is a big radial distortion.
5780     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5781     * (see the picture below).
5782     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5783     * (see the picture below).
5784     *
5785     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5786     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5787     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5788     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5789     * coordinates. The function distinguishes the following two cases:
5790     *
5791     * <ul>
5792     *   <li>
5793     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5794     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5795     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5796     *     y-coordinate. P1 and P2 look like:
5797     *   </li>
5798     * </ul>
5799     *
5800     *     \(\texttt{P1} = \begin{bmatrix}
5801     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5802     *                         0 &amp; f &amp; cy &amp; 0 \\
5803     *                         0 &amp; 0 &amp; 1 &amp; 0
5804     *                      \end{bmatrix}\)
5805     *
5806     *     \(\texttt{P2} = \begin{bmatrix}
5807     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5808     *                         0 &amp; f &amp; cy &amp; 0 \\
5809     *                         0 &amp; 0 &amp; 1 &amp; 0
5810     *                      \end{bmatrix} ,\)
5811     *
5812     *     \(\texttt{Q} = \begin{bmatrix}
5813     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5814     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5815     *                         0 &amp; 0 &amp; 0 &amp; f \\
5816     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5817     *                     \end{bmatrix} \)
5818     *
5819     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5820     *     REF: CALIB_ZERO_DISPARITY is set.
5821     *
5822     * <ul>
5823     *   <li>
5824     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5825     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5826     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5827     *   </li>
5828     * </ul>
5829     *
5830     *     \(\texttt{P1} = \begin{bmatrix}
5831     *                         f &amp; 0 &amp; cx &amp; 0 \\
5832     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5833     *                         0 &amp; 0 &amp; 1 &amp; 0
5834     *                      \end{bmatrix}\)
5835     *
5836     *     \(\texttt{P2} = \begin{bmatrix}
5837     *                         f &amp; 0 &amp; cx &amp; 0 \\
5838     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5839     *                         0 &amp; 0 &amp; 1 &amp; 0
5840     *                      \end{bmatrix},\)
5841     *
5842     *     \(\texttt{Q} = \begin{bmatrix}
5843     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5844     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5845     *                         0 &amp; 0 &amp; 0 &amp; f \\
5846     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5847     *                     \end{bmatrix} \)
5848     *
5849     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5850     *     REF: CALIB_ZERO_DISPARITY is set.
5851     *
5852     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5853     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5854     * initialize the rectification map for each camera.
5855     *
5856     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5857     * the corresponding image regions. This means that the images are well rectified, which is what most
5858     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5859     * their interiors are all valid pixels.
5860     *
5861     * ![image](pics/stereo_undistort.jpg)
5862     */
5863    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) {
5864        stereoRectify_4(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags);
5865    }
5866
5867    /**
5868     * Computes rectification transforms for each head of a calibrated stereo camera.
5869     *
5870     * @param cameraMatrix1 First camera intrinsic matrix.
5871     * @param distCoeffs1 First camera distortion parameters.
5872     * @param cameraMatrix2 Second camera intrinsic matrix.
5873     * @param distCoeffs2 Second camera distortion parameters.
5874     * @param imageSize Size of the image used for stereo calibration.
5875     * @param R Rotation matrix from the coordinate system of the first camera to the second camera,
5876     * see REF: stereoCalibrate.
5877     * @param T Translation vector from the coordinate system of the first camera to the second camera,
5878     * see REF: stereoCalibrate.
5879     * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
5880     * brings points given in the unrectified first camera's coordinate system to points in the rectified
5881     * first camera's coordinate system. In more technical terms, it performs a change of basis from the
5882     * unrectified first camera's coordinate system to the rectified first camera's coordinate system.
5883     * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
5884     * brings points given in the unrectified second camera's coordinate system to points in the rectified
5885     * second camera's coordinate system. In more technical terms, it performs a change of basis from the
5886     * unrectified second camera's coordinate system to the rectified second camera's coordinate system.
5887     * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
5888     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5889     * rectified first camera's image.
5890     * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
5891     * camera, i.e. it projects points given in the rectified first camera coordinate system into the
5892     * rectified second camera's image.
5893     * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D).
5894     * the function makes the principal points of each camera have the same pixel coordinates in the
5895     * rectified views. And if the flag is not set, the function may still shift the images in the
5896     * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
5897     * useful image area.
5898     * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
5899     * images are zoomed and shifted so that only valid pixels are visible (no black areas after
5900     * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
5901     * pixels from the original images from the cameras are retained in the rectified images (no source
5902     * image pixels are lost). Any intermediate value yields an intermediate result between
5903     * those two extreme cases.
5904     * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
5905     * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
5906     * preserve details in the original image, especially when there is a big radial distortion.
5907     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5908     * (see the picture below).
5909     * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
5910     * (see the picture below).
5911     *
5912     * The function computes the rotation matrices for each camera that (virtually) make both camera image
5913     * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
5914     * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
5915     * as input. As output, it provides two rotation matrices and also two projection matrices in the new
5916     * coordinates. The function distinguishes the following two cases:
5917     *
5918     * <ul>
5919     *   <li>
5920     *    <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other
5921     *     mainly along the x-axis (with possible small vertical shift). In the rectified images, the
5922     *     corresponding epipolar lines in the left and right cameras are horizontal and have the same
5923     *     y-coordinate. P1 and P2 look like:
5924     *   </li>
5925     * </ul>
5926     *
5927     *     \(\texttt{P1} = \begin{bmatrix}
5928     *                         f &amp; 0 &amp; cx_1 &amp; 0 \\
5929     *                         0 &amp; f &amp; cy &amp; 0 \\
5930     *                         0 &amp; 0 &amp; 1 &amp; 0
5931     *                      \end{bmatrix}\)
5932     *
5933     *     \(\texttt{P2} = \begin{bmatrix}
5934     *                         f &amp; 0 &amp; cx_2 &amp; T_x \cdot f \\
5935     *                         0 &amp; f &amp; cy &amp; 0 \\
5936     *                         0 &amp; 0 &amp; 1 &amp; 0
5937     *                      \end{bmatrix} ,\)
5938     *
5939     *     \(\texttt{Q} = \begin{bmatrix}
5940     *                         1 &amp; 0 &amp; 0 &amp; -cx_1 \\
5941     *                         0 &amp; 1 &amp; 0 &amp; -cy \\
5942     *                         0 &amp; 0 &amp; 0 &amp; f \\
5943     *                         0 &amp; 0 &amp; -\frac{1}{T_x} &amp; \frac{cx_1 - cx_2}{T_x}
5944     *                     \end{bmatrix} \)
5945     *
5946     *     where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if
5947     *     REF: CALIB_ZERO_DISPARITY is set.
5948     *
5949     * <ul>
5950     *   <li>
5951     *    <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other
5952     *     mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
5953     *     lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
5954     *   </li>
5955     * </ul>
5956     *
5957     *     \(\texttt{P1} = \begin{bmatrix}
5958     *                         f &amp; 0 &amp; cx &amp; 0 \\
5959     *                         0 &amp; f &amp; cy_1 &amp; 0 \\
5960     *                         0 &amp; 0 &amp; 1 &amp; 0
5961     *                      \end{bmatrix}\)
5962     *
5963     *     \(\texttt{P2} = \begin{bmatrix}
5964     *                         f &amp; 0 &amp; cx &amp; 0 \\
5965     *                         0 &amp; f &amp; cy_2 &amp; T_y \cdot f \\
5966     *                         0 &amp; 0 &amp; 1 &amp; 0
5967     *                      \end{bmatrix},\)
5968     *
5969     *     \(\texttt{Q} = \begin{bmatrix}
5970     *                         1 &amp; 0 &amp; 0 &amp; -cx \\
5971     *                         0 &amp; 1 &amp; 0 &amp; -cy_1 \\
5972     *                         0 &amp; 0 &amp; 0 &amp; f \\
5973     *                         0 &amp; 0 &amp; -\frac{1}{T_y} &amp; \frac{cy_1 - cy_2}{T_y}
5974     *                     \end{bmatrix} \)
5975     *
5976     *     where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if
5977     *     REF: CALIB_ZERO_DISPARITY is set.
5978     *
5979     * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
5980     * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
5981     * initialize the rectification map for each camera.
5982     *
5983     * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
5984     * the corresponding image regions. This means that the images are well rectified, which is what most
5985     * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
5986     * their interiors are all valid pixels.
5987     *
5988     * ![image](pics/stereo_undistort.jpg)
5989     */
5990    public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) {
5991        stereoRectify_5(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj);
5992    }
5993
5994
5995    //
5996    // C++:  bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
5997    //
5998
5999    /**
6000     * Computes a rectification transform for an uncalibrated stereo camera.
6001     *
6002     * @param points1 Array of feature points in the first image.
6003     * @param points2 The corresponding points in the second image. The same formats as in
6004     * #findFundamentalMat are supported.
6005     * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
6006     * #findFundamentalMat .
6007     * @param imgSize Size of the image.
6008     * @param H1 Output rectification homography matrix for the first image.
6009     * @param H2 Output rectification homography matrix for the second image.
6010     * @param threshold Optional threshold used to filter out the outliers. If the parameter is greater
6011     * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
6012     * for which \(|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|&gt;\texttt{threshold}\) )
6013     * are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
6014     *
6015     * The function computes the rectification transformations without knowing intrinsic parameters of the
6016     * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
6017     * related difference from #stereoRectify is that the function outputs not the rectification
6018     * transformations in the object (3D) space, but the planar perspective transformations encoded by the
6019     * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
6020     *
6021     * <b>Note:</b>
6022     *    While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
6023     *     depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
6024     *     it would be better to correct it before computing the fundamental matrix and calling this
6025     *     function. For example, distortion coefficients can be estimated for each head of stereo camera
6026     *     separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
6027     *     just the point coordinates can be corrected with #undistortPoints .
6028     * @return automatically generated
6029     */
6030    public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold) {
6031        return stereoRectifyUncalibrated_0(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj, threshold);
6032    }
6033
6034    /**
6035     * Computes a rectification transform for an uncalibrated stereo camera.
6036     *
6037     * @param points1 Array of feature points in the first image.
6038     * @param points2 The corresponding points in the second image. The same formats as in
6039     * #findFundamentalMat are supported.
6040     * @param F Input fundamental matrix. It can be computed from the same set of point pairs using
6041     * #findFundamentalMat .
6042     * @param imgSize Size of the image.
6043     * @param H1 Output rectification homography matrix for the first image.
6044     * @param H2 Output rectification homography matrix for the second image.
6045     * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
6046     * for which \(|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|&gt;\texttt{threshold}\) )
6047     * are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
6048     *
6049     * The function computes the rectification transformations without knowing intrinsic parameters of the
6050     * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
6051     * related difference from #stereoRectify is that the function outputs not the rectification
6052     * transformations in the object (3D) space, but the planar perspective transformations encoded by the
6053     * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 .
6054     *
6055     * <b>Note:</b>
6056     *    While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
6057     *     depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
6058     *     it would be better to correct it before computing the fundamental matrix and calling this
6059     *     function. For example, distortion coefficients can be estimated for each head of stereo camera
6060     *     separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
6061     *     just the point coordinates can be corrected with #undistortPoints .
6062     * @return automatically generated
6063     */
6064    public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2) {
6065        return stereoRectifyUncalibrated_1(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj);
6066    }
6067
6068
6069    //
6070    // C++:  float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
6071    //
6072
6073    public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List<Mat> imgpt1, List<Mat> imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags) {
6074        Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1);
6075        Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3);
6076        double[] roi1_out = new double[4];
6077        double[] roi2_out = new double[4];
6078        float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags);
6079        if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } 
6080        if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } 
6081        return retVal;
6082    }
6083
6084
6085    //
6086    // C++:  Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
6087    //
6088
6089    /**
6090     * Returns the new camera intrinsic matrix based on the free scaling parameter.
6091     *
6092     * @param cameraMatrix Input camera intrinsic matrix.
6093     * @param distCoeffs Input vector of distortion coefficients
6094     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
6095     * assumed.
6096     * @param imageSize Original image size.
6097     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
6098     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
6099     * #stereoRectify for details.
6100     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
6101     * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
6102     * undistorted image. See roi1, roi2 description in #stereoRectify .
6103     * @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
6104     * principal point should be at the image center or not. By default, the principal point is chosen to
6105     * best fit a subset of the source image (determined by alpha) to the corrected image.
6106     * @return new_camera_matrix Output new camera intrinsic matrix.
6107     *
6108     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
6109     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
6110     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
6111     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
6112     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
6113     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
6114     * #initUndistortRectifyMap to produce the maps for #remap .
6115     */
6116    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint) {
6117        double[] validPixROI_out = new double[4];
6118        Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint));
6119        if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 
6120        return retVal;
6121    }
6122
6123    /**
6124     * Returns the new camera intrinsic matrix based on the free scaling parameter.
6125     *
6126     * @param cameraMatrix Input camera intrinsic matrix.
6127     * @param distCoeffs Input vector of distortion coefficients
6128     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
6129     * assumed.
6130     * @param imageSize Original image size.
6131     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
6132     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
6133     * #stereoRectify for details.
6134     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
6135     * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
6136     * undistorted image. See roi1, roi2 description in #stereoRectify .
6137     * principal point should be at the image center or not. By default, the principal point is chosen to
6138     * best fit a subset of the source image (determined by alpha) to the corrected image.
6139     * @return new_camera_matrix Output new camera intrinsic matrix.
6140     *
6141     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
6142     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
6143     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
6144     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
6145     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
6146     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
6147     * #initUndistortRectifyMap to produce the maps for #remap .
6148     */
6149    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI) {
6150        double[] validPixROI_out = new double[4];
6151        Mat retVal = new Mat(getOptimalNewCameraMatrix_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out));
6152        if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 
6153        return retVal;
6154    }
6155
6156    /**
6157     * Returns the new camera intrinsic matrix based on the free scaling parameter.
6158     *
6159     * @param cameraMatrix Input camera intrinsic matrix.
6160     * @param distCoeffs Input vector of distortion coefficients
6161     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
6162     * assumed.
6163     * @param imageSize Original image size.
6164     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
6165     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
6166     * #stereoRectify for details.
6167     * @param newImgSize Image size after rectification. By default, it is set to imageSize .
6168     * undistorted image. See roi1, roi2 description in #stereoRectify .
6169     * principal point should be at the image center or not. By default, the principal point is chosen to
6170     * best fit a subset of the source image (determined by alpha) to the corrected image.
6171     * @return new_camera_matrix Output new camera intrinsic matrix.
6172     *
6173     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
6174     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
6175     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
6176     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
6177     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
6178     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
6179     * #initUndistortRectifyMap to produce the maps for #remap .
6180     */
6181    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize) {
6182        return new Mat(getOptimalNewCameraMatrix_2(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height));
6183    }
6184
6185    /**
6186     * Returns the new camera intrinsic matrix based on the free scaling parameter.
6187     *
6188     * @param cameraMatrix Input camera intrinsic matrix.
6189     * @param distCoeffs Input vector of distortion coefficients
6190     * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are
6191     * assumed.
6192     * @param imageSize Original image size.
6193     * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
6194     * valid) and 1 (when all the source image pixels are retained in the undistorted image). See
6195     * #stereoRectify for details.
6196     * undistorted image. See roi1, roi2 description in #stereoRectify .
6197     * principal point should be at the image center or not. By default, the principal point is chosen to
6198     * best fit a subset of the source image (determined by alpha) to the corrected image.
6199     * @return new_camera_matrix Output new camera intrinsic matrix.
6200     *
6201     * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
6202     * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
6203     * image pixels if there is valuable information in the corners alpha=1 , or get something in between.
6204     * When alpha&gt;0 , the undistorted result is likely to have some black pixels corresponding to
6205     * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
6206     * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
6207     * #initUndistortRectifyMap to produce the maps for #remap .
6208     */
6209    public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha) {
6210        return new Mat(getOptimalNewCameraMatrix_3(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha));
6211    }
6212
6213
6214    //
6215    // C++:  void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
6216    //
6217
6218    /**
6219     * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
6220     *
6221     * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
6222     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
6223     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6224     * for all the transformations from gripper frame to robot base frame.
6225     * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
6226     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
6227     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6228     * from gripper frame to robot base frame.
6229     * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
6230     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
6231     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6232     * for all the transformations from calibration target frame to camera frame.
6233     * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
6234     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
6235     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6236     * from calibration target frame to camera frame.
6237     * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6238     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
6239     * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6240     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
6241     * @param method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
6242     *
6243     * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
6244     * rotation then the translation (separable solutions) and the following methods are implemented:
6245     * <ul>
6246     *   <li>
6247     *    R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
6248     *   </li>
6249     *   <li>
6250     *    F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
6251     *   </li>
6252     *   <li>
6253     *    R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
6254     *   </li>
6255     * </ul>
6256     *
6257     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
6258     * with the following implemented methods:
6259     * <ul>
6260     *   <li>
6261     *    N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
6262     *   </li>
6263     *   <li>
6264     *    K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
6265     *   </li>
6266     * </ul>
6267     *
6268     * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
6269     * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
6270     *
6271     * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
6272     * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
6273     * the suitable transformations to the function, see below.
6274     *
6275     * ![](pics/hand-eye_figure.png)
6276     *
6277     * The calibration procedure is the following:
6278     * <ul>
6279     *   <li>
6280     *    a static calibration pattern is used to estimate the transformation between the target frame
6281     *   and the camera frame
6282     *   </li>
6283     *   <li>
6284     *    the robot gripper is moved in order to acquire several poses
6285     *   </li>
6286     *   <li>
6287     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
6288     *   instance the robot kinematics
6289     * \(
6290     *     \begin{bmatrix}
6291     *     X_b\\
6292     *     Y_b\\
6293     *     Z_b\\
6294     *     1
6295     *     \end{bmatrix}
6296     *     =
6297     *     \begin{bmatrix}
6298     *     _{}^{b}\textrm{R}_g &amp; _{}^{b}\textrm{t}_g \\
6299     *     0_{1 \times 3} &amp; 1
6300     *     \end{bmatrix}
6301     *     \begin{bmatrix}
6302     *     X_g\\
6303     *     Y_g\\
6304     *     Z_g\\
6305     *     1
6306     *     \end{bmatrix}
6307     * \)
6308     *   </li>
6309     *   <li>
6310     *    for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
6311     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
6312     * \(
6313     *     \begin{bmatrix}
6314     *     X_c\\
6315     *     Y_c\\
6316     *     Z_c\\
6317     *     1
6318     *     \end{bmatrix}
6319     *     =
6320     *     \begin{bmatrix}
6321     *     _{}^{c}\textrm{R}_t &amp; _{}^{c}\textrm{t}_t \\
6322     *     0_{1 \times 3} &amp; 1
6323     *     \end{bmatrix}
6324     *     \begin{bmatrix}
6325     *     X_t\\
6326     *     Y_t\\
6327     *     Z_t\\
6328     *     1
6329     *     \end{bmatrix}
6330     * \)
6331     *   </li>
6332     * </ul>
6333     *
6334     * The Hand-Eye calibration procedure returns the following homogeneous transformation
6335     * \(
6336     *     \begin{bmatrix}
6337     *     X_g\\
6338     *     Y_g\\
6339     *     Z_g\\
6340     *     1
6341     *     \end{bmatrix}
6342     *     =
6343     *     \begin{bmatrix}
6344     *     _{}^{g}\textrm{R}_c &amp; _{}^{g}\textrm{t}_c \\
6345     *     0_{1 \times 3} &amp; 1
6346     *     \end{bmatrix}
6347     *     \begin{bmatrix}
6348     *     X_c\\
6349     *     Y_c\\
6350     *     Z_c\\
6351     *     1
6352     *     \end{bmatrix}
6353     * \)
6354     *
6355     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
6356     * <ul>
6357     *   <li>
6358     *    for an eye-in-hand configuration
6359     * \(
6360     *     \begin{align*}
6361     *     ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
6362     *     \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
6363     *   </li>
6364     * </ul>
6365     *
6366     *     (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &amp;=
6367     *     \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
6368     *
6369     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
6370     *     \end{align*}
6371     * \)
6372     *
6373     * <ul>
6374     *   <li>
6375     *    for an eye-to-hand configuration
6376     * \(
6377     *     \begin{align*}
6378     *     ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
6379     *     \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
6380     *   </li>
6381     * </ul>
6382     *
6383     *     (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &amp;=
6384     *     \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
6385     *
6386     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
6387     *     \end{align*}
6388     * \)
6389     *
6390     * \note
6391     * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
6392     * \note
6393     * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
6394     * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
6395     */
6396    public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper, int method) {
6397        Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base);
6398        Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base);
6399        Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam);
6400        Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam);
6401        calibrateHandEye_0(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj, method);
6402    }
6403
6404    /**
6405     * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\)
6406     *
6407     * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
6408     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
6409     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6410     * for all the transformations from gripper frame to robot base frame.
6411     * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
6412     * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)).
6413     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6414     * from gripper frame to robot base frame.
6415     * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
6416     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
6417     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6418     * for all the transformations from calibration target frame to camera frame.
6419     * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
6420     * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)).
6421     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6422     * from calibration target frame to camera frame.
6423     * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6424     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
6425     * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6426     * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)).
6427     *
6428     * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
6429     * rotation then the translation (separable solutions) and the following methods are implemented:
6430     * <ul>
6431     *   <li>
6432     *    R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
6433     *   </li>
6434     *   <li>
6435     *    F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
6436     *   </li>
6437     *   <li>
6438     *    R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
6439     *   </li>
6440     * </ul>
6441     *
6442     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
6443     * with the following implemented methods:
6444     * <ul>
6445     *   <li>
6446     *    N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
6447     *   </li>
6448     *   <li>
6449     *    K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
6450     *   </li>
6451     * </ul>
6452     *
6453     * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
6454     * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
6455     *
6456     * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
6457     * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
6458     * the suitable transformations to the function, see below.
6459     *
6460     * ![](pics/hand-eye_figure.png)
6461     *
6462     * The calibration procedure is the following:
6463     * <ul>
6464     *   <li>
6465     *    a static calibration pattern is used to estimate the transformation between the target frame
6466     *   and the camera frame
6467     *   </li>
6468     *   <li>
6469     *    the robot gripper is moved in order to acquire several poses
6470     *   </li>
6471     *   <li>
6472     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
6473     *   instance the robot kinematics
6474     * \(
6475     *     \begin{bmatrix}
6476     *     X_b\\
6477     *     Y_b\\
6478     *     Z_b\\
6479     *     1
6480     *     \end{bmatrix}
6481     *     =
6482     *     \begin{bmatrix}
6483     *     _{}^{b}\textrm{R}_g &amp; _{}^{b}\textrm{t}_g \\
6484     *     0_{1 \times 3} &amp; 1
6485     *     \end{bmatrix}
6486     *     \begin{bmatrix}
6487     *     X_g\\
6488     *     Y_g\\
6489     *     Z_g\\
6490     *     1
6491     *     \end{bmatrix}
6492     * \)
6493     *   </li>
6494     *   <li>
6495     *    for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
6496     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
6497     * \(
6498     *     \begin{bmatrix}
6499     *     X_c\\
6500     *     Y_c\\
6501     *     Z_c\\
6502     *     1
6503     *     \end{bmatrix}
6504     *     =
6505     *     \begin{bmatrix}
6506     *     _{}^{c}\textrm{R}_t &amp; _{}^{c}\textrm{t}_t \\
6507     *     0_{1 \times 3} &amp; 1
6508     *     \end{bmatrix}
6509     *     \begin{bmatrix}
6510     *     X_t\\
6511     *     Y_t\\
6512     *     Z_t\\
6513     *     1
6514     *     \end{bmatrix}
6515     * \)
6516     *   </li>
6517     * </ul>
6518     *
6519     * The Hand-Eye calibration procedure returns the following homogeneous transformation
6520     * \(
6521     *     \begin{bmatrix}
6522     *     X_g\\
6523     *     Y_g\\
6524     *     Z_g\\
6525     *     1
6526     *     \end{bmatrix}
6527     *     =
6528     *     \begin{bmatrix}
6529     *     _{}^{g}\textrm{R}_c &amp; _{}^{g}\textrm{t}_c \\
6530     *     0_{1 \times 3} &amp; 1
6531     *     \end{bmatrix}
6532     *     \begin{bmatrix}
6533     *     X_c\\
6534     *     Y_c\\
6535     *     Z_c\\
6536     *     1
6537     *     \end{bmatrix}
6538     * \)
6539     *
6540     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation:
6541     * <ul>
6542     *   <li>
6543     *    for an eye-in-hand configuration
6544     * \(
6545     *     \begin{align*}
6546     *     ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
6547     *     \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
6548     *   </li>
6549     * </ul>
6550     *
6551     *     (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &amp;=
6552     *     \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
6553     *
6554     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
6555     *     \end{align*}
6556     * \)
6557     *
6558     * <ul>
6559     *   <li>
6560     *    for an eye-to-hand configuration
6561     * \(
6562     *     \begin{align*}
6563     *     ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &amp;=
6564     *     \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
6565     *   </li>
6566     * </ul>
6567     *
6568     *     (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &amp;=
6569     *     \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
6570     *
6571     *     \textrm{A}_i \textrm{X} &amp;= \textrm{X} \textrm{B}_i \\
6572     *     \end{align*}
6573     * \)
6574     *
6575     * \note
6576     * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
6577     * \note
6578     * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
6579     * So at least 3 different poses are required, but it is strongly recommended to use many more poses.
6580     */
6581    public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper) {
6582        Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base);
6583        Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base);
6584        Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam);
6585        Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam);
6586        calibrateHandEye_1(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj);
6587    }
6588
6589
6590    //
6591    // C++:  void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
6592    //
6593
6594    /**
6595     * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)
6596     *
6597     * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
6598     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
6599     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6600     * for all the transformations from world frame to the camera frame.
6601     * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
6602     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
6603     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6604     * from world frame to the camera frame.
6605     * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
6606     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
6607     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6608     * for all the transformations from robot base frame to the gripper frame.
6609     * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
6610     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
6611     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6612     * from robot base frame to the gripper frame.
6613     * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6614     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
6615     * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6616     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
6617     * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6618     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
6619     * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6620     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
6621     * @param method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
6622     *
6623     * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
6624     * rotation then the translation (separable solutions):
6625     * <ul>
6626     *   <li>
6627     *    M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
6628     *   </li>
6629     * </ul>
6630     *
6631     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
6632     * with the following implemented method:
6633     * <ul>
6634     *   <li>
6635     *    A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
6636     *   </li>
6637     * </ul>
6638     *
6639     * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
6640     * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
6641     *
6642     * ![](pics/robot-world_hand-eye_figure.png)
6643     *
6644     * The calibration procedure is the following:
6645     * <ul>
6646     *   <li>
6647     *    a static calibration pattern is used to estimate the transformation between the target frame
6648     *   and the camera frame
6649     *   </li>
6650     *   <li>
6651     *    the robot gripper is moved in order to acquire several poses
6652     *   </li>
6653     *   <li>
6654     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
6655     *   instance the robot kinematics
6656     * \(
6657     *     \begin{bmatrix}
6658     *     X_g\\
6659     *     Y_g\\
6660     *     Z_g\\
6661     *     1
6662     *     \end{bmatrix}
6663     *     =
6664     *     \begin{bmatrix}
6665     *     _{}^{g}\textrm{R}_b &amp; _{}^{g}\textrm{t}_b \\
6666     *     0_{1 \times 3} &amp; 1
6667     *     \end{bmatrix}
6668     *     \begin{bmatrix}
6669     *     X_b\\
6670     *     Y_b\\
6671     *     Z_b\\
6672     *     1
6673     *     \end{bmatrix}
6674     * \)
6675     *   </li>
6676     *   <li>
6677     *    for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
6678     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
6679     * \(
6680     *     \begin{bmatrix}
6681     *     X_c\\
6682     *     Y_c\\
6683     *     Z_c\\
6684     *     1
6685     *     \end{bmatrix}
6686     *     =
6687     *     \begin{bmatrix}
6688     *     _{}^{c}\textrm{R}_w &amp; _{}^{c}\textrm{t}_w \\
6689     *     0_{1 \times 3} &amp; 1
6690     *     \end{bmatrix}
6691     *     \begin{bmatrix}
6692     *     X_w\\
6693     *     Y_w\\
6694     *     Z_w\\
6695     *     1
6696     *     \end{bmatrix}
6697     * \)
6698     *   </li>
6699     * </ul>
6700     *
6701     * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
6702     * \(
6703     *     \begin{bmatrix}
6704     *     X_w\\
6705     *     Y_w\\
6706     *     Z_w\\
6707     *     1
6708     *     \end{bmatrix}
6709     *     =
6710     *     \begin{bmatrix}
6711     *     _{}^{w}\textrm{R}_b &amp; _{}^{w}\textrm{t}_b \\
6712     *     0_{1 \times 3} &amp; 1
6713     *     \end{bmatrix}
6714     *     \begin{bmatrix}
6715     *     X_b\\
6716     *     Y_b\\
6717     *     Z_b\\
6718     *     1
6719     *     \end{bmatrix}
6720     * \)
6721     * \(
6722     *     \begin{bmatrix}
6723     *     X_c\\
6724     *     Y_c\\
6725     *     Z_c\\
6726     *     1
6727     *     \end{bmatrix}
6728     *     =
6729     *     \begin{bmatrix}
6730     *     _{}^{c}\textrm{R}_g &amp; _{}^{c}\textrm{t}_g \\
6731     *     0_{1 \times 3} &amp; 1
6732     *     \end{bmatrix}
6733     *     \begin{bmatrix}
6734     *     X_g\\
6735     *     Y_g\\
6736     *     Z_g\\
6737     *     1
6738     *     \end{bmatrix}
6739     * \)
6740     *
6741     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
6742     * <ul>
6743     *   <li>
6744     *    \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
6745     *   </li>
6746     *   <li>
6747     *    \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
6748     *   </li>
6749     *   <li>
6750     *    \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
6751     *   </li>
6752     *   <li>
6753     *    \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
6754     *   </li>
6755     * </ul>
6756     *
6757     * \note
6758     * At least 3 measurements are required (input vectors size must be greater or equal to 3).
6759     */
6760    public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam, int method) {
6761        Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam);
6762        Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam);
6763        Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper);
6764        Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper);
6765        calibrateRobotWorldHandEye_0(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj, method);
6766    }
6767
6768    /**
6769     * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\)
6770     *
6771     * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
6772     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
6773     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6774     * for all the transformations from world frame to the camera frame.
6775     * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
6776     * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)).
6777     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6778     * from world frame to the camera frame.
6779     * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
6780     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
6781     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors,
6782     * for all the transformations from robot base frame to the gripper frame.
6783     * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
6784     * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)).
6785     * This is a vector ({@code vector&lt;Mat&gt;}) that contains the {@code (3x1)} translation vectors for all the transformations
6786     * from robot base frame to the gripper frame.
6787     * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6788     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
6789     * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6790     * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)).
6791     * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point
6792     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
6793     * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point
6794     * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)).
6795     *
6796     * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
6797     * rotation then the translation (separable solutions):
6798     * <ul>
6799     *   <li>
6800     *    M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
6801     *   </li>
6802     * </ul>
6803     *
6804     * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
6805     * with the following implemented method:
6806     * <ul>
6807     *   <li>
6808     *    A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
6809     *   </li>
6810     * </ul>
6811     *
6812     * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
6813     * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
6814     *
6815     * ![](pics/robot-world_hand-eye_figure.png)
6816     *
6817     * The calibration procedure is the following:
6818     * <ul>
6819     *   <li>
6820     *    a static calibration pattern is used to estimate the transformation between the target frame
6821     *   and the camera frame
6822     *   </li>
6823     *   <li>
6824     *    the robot gripper is moved in order to acquire several poses
6825     *   </li>
6826     *   <li>
6827     *    for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
6828     *   instance the robot kinematics
6829     * \(
6830     *     \begin{bmatrix}
6831     *     X_g\\
6832     *     Y_g\\
6833     *     Z_g\\
6834     *     1
6835     *     \end{bmatrix}
6836     *     =
6837     *     \begin{bmatrix}
6838     *     _{}^{g}\textrm{R}_b &amp; _{}^{g}\textrm{t}_b \\
6839     *     0_{1 \times 3} &amp; 1
6840     *     \end{bmatrix}
6841     *     \begin{bmatrix}
6842     *     X_b\\
6843     *     Y_b\\
6844     *     Z_b\\
6845     *     1
6846     *     \end{bmatrix}
6847     * \)
6848     *   </li>
6849     *   <li>
6850     *    for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
6851     *   for instance a pose estimation method (PnP) from 2D-3D point correspondences
6852     * \(
6853     *     \begin{bmatrix}
6854     *     X_c\\
6855     *     Y_c\\
6856     *     Z_c\\
6857     *     1
6858     *     \end{bmatrix}
6859     *     =
6860     *     \begin{bmatrix}
6861     *     _{}^{c}\textrm{R}_w &amp; _{}^{c}\textrm{t}_w \\
6862     *     0_{1 \times 3} &amp; 1
6863     *     \end{bmatrix}
6864     *     \begin{bmatrix}
6865     *     X_w\\
6866     *     Y_w\\
6867     *     Z_w\\
6868     *     1
6869     *     \end{bmatrix}
6870     * \)
6871     *   </li>
6872     * </ul>
6873     *
6874     * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
6875     * \(
6876     *     \begin{bmatrix}
6877     *     X_w\\
6878     *     Y_w\\
6879     *     Z_w\\
6880     *     1
6881     *     \end{bmatrix}
6882     *     =
6883     *     \begin{bmatrix}
6884     *     _{}^{w}\textrm{R}_b &amp; _{}^{w}\textrm{t}_b \\
6885     *     0_{1 \times 3} &amp; 1
6886     *     \end{bmatrix}
6887     *     \begin{bmatrix}
6888     *     X_b\\
6889     *     Y_b\\
6890     *     Z_b\\
6891     *     1
6892     *     \end{bmatrix}
6893     * \)
6894     * \(
6895     *     \begin{bmatrix}
6896     *     X_c\\
6897     *     Y_c\\
6898     *     Z_c\\
6899     *     1
6900     *     \end{bmatrix}
6901     *     =
6902     *     \begin{bmatrix}
6903     *     _{}^{c}\textrm{R}_g &amp; _{}^{c}\textrm{t}_g \\
6904     *     0_{1 \times 3} &amp; 1
6905     *     \end{bmatrix}
6906     *     \begin{bmatrix}
6907     *     X_g\\
6908     *     Y_g\\
6909     *     Z_g\\
6910     *     1
6911     *     \end{bmatrix}
6912     * \)
6913     *
6914     * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with:
6915     * <ul>
6916     *   <li>
6917     *    \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\)
6918     *   </li>
6919     *   <li>
6920     *    \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\)
6921     *   </li>
6922     *   <li>
6923     *    \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\)
6924     *   </li>
6925     *   <li>
6926     *    \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\)
6927     *   </li>
6928     * </ul>
6929     *
6930     * \note
6931     * At least 3 measurements are required (input vectors size must be greater or equal to 3).
6932     */
6933    public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam) {
6934        Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam);
6935        Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam);
6936        Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper);
6937        Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper);
6938        calibrateRobotWorldHandEye_1(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj);
6939    }
6940
6941
6942    //
6943    // C++:  void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
6944    //
6945
6946    /**
6947     * Converts points from Euclidean to homogeneous space.
6948     *
6949     * @param src Input vector of N-dimensional points.
6950     * @param dst Output vector of N+1-dimensional points.
6951     *
6952     * The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
6953     * point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
6954     */
6955    public static void convertPointsToHomogeneous(Mat src, Mat dst) {
6956        convertPointsToHomogeneous_0(src.nativeObj, dst.nativeObj);
6957    }
6958
6959
6960    //
6961    // C++:  void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
6962    //
6963
6964    /**
6965     * Converts points from homogeneous to Euclidean space.
6966     *
6967     * @param src Input vector of N-dimensional points.
6968     * @param dst Output vector of N-1-dimensional points.
6969     *
6970     * The function converts points homogeneous to Euclidean space using perspective projection. That is,
6971     * each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
6972     * output point coordinates will be (0,0,0,...).
6973     */
6974    public static void convertPointsFromHomogeneous(Mat src, Mat dst) {
6975        convertPointsFromHomogeneous_0(src.nativeObj, dst.nativeObj);
6976    }
6977
6978
6979    //
6980    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
6981    //
6982
6983    /**
6984     * Calculates a fundamental matrix from the corresponding points in two images.
6985     *
6986     * @param points1 Array of N points from the first image. The point coordinates should be
6987     * floating-point (single or double precision).
6988     * @param points2 Array of the second image points of the same size and format as points1 .
6989     * @param method Method for computing a fundamental matrix.
6990     * <ul>
6991     *   <li>
6992     *    REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
6993     *   </li>
6994     *   <li>
6995     *    REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
6996     *   </li>
6997     *   <li>
6998     *    REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
6999     *   </li>
7000     *   <li>
7001     *    REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
7002     *   </li>
7003     * </ul>
7004     * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
7005     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7006     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7007     * point localization, image resolution, and the image noise.
7008     * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
7009     * of confidence (probability) that the estimated matrix is correct.
7010     * @param mask optional output mask
7011     * @param maxIters The maximum number of robust method iterations.
7012     *
7013     * The epipolar geometry is described by the following equation:
7014     *
7015     * \([p_2; 1]^T F [p_1; 1] = 0\)
7016     *
7017     * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7018     * second images, respectively.
7019     *
7020     * The function calculates the fundamental matrix using one of four methods listed above and returns
7021     * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
7022     * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
7023     * matrices sequentially).
7024     *
7025     * The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
7026     * epipolar lines corresponding to the specified points. It can also be passed to
7027     * #stereoRectifyUncalibrated to compute the rectification transformation. :
7028     * <code>
7029     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
7030     *     int point_count = 100;
7031     *     vector&lt;Point2f&gt; points1(point_count);
7032     *     vector&lt;Point2f&gt; points2(point_count);
7033     *
7034     *     // initialize the points here ...
7035     *     for( int i = 0; i &lt; point_count; i++ )
7036     *     {
7037     *         points1[i] = ...;
7038     *         points2[i] = ...;
7039     *     }
7040     *
7041     *     Mat fundamental_matrix =
7042     *      findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
7043     * </code>
7044     * @return automatically generated
7045     */
7046    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat mask) {
7047        Mat points1_mat = points1;
7048        Mat points2_mat = points2;
7049        return new Mat(findFundamentalMat_0(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters, mask.nativeObj));
7050    }
7051
7052    /**
7053     * Calculates a fundamental matrix from the corresponding points in two images.
7054     *
7055     * @param points1 Array of N points from the first image. The point coordinates should be
7056     * floating-point (single or double precision).
7057     * @param points2 Array of the second image points of the same size and format as points1 .
7058     * @param method Method for computing a fundamental matrix.
7059     * <ul>
7060     *   <li>
7061     *    REF: FM_7POINT for a 7-point algorithm. \(N = 7\)
7062     *   </li>
7063     *   <li>
7064     *    REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\)
7065     *   </li>
7066     *   <li>
7067     *    REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\)
7068     *   </li>
7069     *   <li>
7070     *    REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\)
7071     *   </li>
7072     * </ul>
7073     * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
7074     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7075     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7076     * point localization, image resolution, and the image noise.
7077     * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
7078     * of confidence (probability) that the estimated matrix is correct.
7079     * @param maxIters The maximum number of robust method iterations.
7080     *
7081     * The epipolar geometry is described by the following equation:
7082     *
7083     * \([p_2; 1]^T F [p_1; 1] = 0\)
7084     *
7085     * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7086     * second images, respectively.
7087     *
7088     * The function calculates the fundamental matrix using one of four methods listed above and returns
7089     * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
7090     * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3
7091     * matrices sequentially).
7092     *
7093     * The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
7094     * epipolar lines corresponding to the specified points. It can also be passed to
7095     * #stereoRectifyUncalibrated to compute the rectification transformation. :
7096     * <code>
7097     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
7098     *     int point_count = 100;
7099     *     vector&lt;Point2f&gt; points1(point_count);
7100     *     vector&lt;Point2f&gt; points2(point_count);
7101     *
7102     *     // initialize the points here ...
7103     *     for( int i = 0; i &lt; point_count; i++ )
7104     *     {
7105     *         points1[i] = ...;
7106     *         points2[i] = ...;
7107     *     }
7108     *
7109     *     Mat fundamental_matrix =
7110     *      findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
7111     * </code>
7112     * @return automatically generated
7113     */
7114    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters) {
7115        Mat points1_mat = points1;
7116        Mat points2_mat = points2;
7117        return new Mat(findFundamentalMat_1(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters));
7118    }
7119
7120
7121    //
7122    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
7123    //
7124
7125    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, Mat mask) {
7126        Mat points1_mat = points1;
7127        Mat points2_mat = points2;
7128        return new Mat(findFundamentalMat_2(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, mask.nativeObj));
7129    }
7130
7131    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence) {
7132        Mat points1_mat = points1;
7133        Mat points2_mat = points2;
7134        return new Mat(findFundamentalMat_3(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence));
7135    }
7136
7137    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold) {
7138        Mat points1_mat = points1;
7139        Mat points2_mat = points2;
7140        return new Mat(findFundamentalMat_4(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold));
7141    }
7142
7143    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method) {
7144        Mat points1_mat = points1;
7145        Mat points2_mat = points2;
7146        return new Mat(findFundamentalMat_5(points1_mat.nativeObj, points2_mat.nativeObj, method));
7147    }
7148
7149    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2) {
7150        Mat points1_mat = points1;
7151        Mat points2_mat = points2;
7152        return new Mat(findFundamentalMat_6(points1_mat.nativeObj, points2_mat.nativeObj));
7153    }
7154
7155
7156    //
7157    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params)
7158    //
7159
7160    public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, Mat mask, UsacParams params) {
7161        Mat points1_mat = points1;
7162        Mat points2_mat = points2;
7163        return new Mat(findFundamentalMat_7(points1_mat.nativeObj, points2_mat.nativeObj, mask.nativeObj, params.nativeObj));
7164    }
7165
7166
7167    //
7168    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
7169    //
7170
7171    /**
7172     * Calculates an essential matrix from the corresponding points in two images.
7173     *
7174     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7175     * be floating-point (single or double precision).
7176     * @param points2 Array of the second image points of the same size and format as points1 .
7177     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7178     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7179     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7180     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7181     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7182     * passing these coordinates, pass the identity matrix for this parameter.
7183     * @param method Method for computing an essential matrix.
7184     * <ul>
7185     *   <li>
7186     *    REF: RANSAC for the RANSAC algorithm.
7187     *   </li>
7188     *   <li>
7189     *    REF: LMEDS for the LMedS algorithm.
7190     *   </li>
7191     * </ul>
7192     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7193     * confidence (probability) that the estimated matrix is correct.
7194     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7195     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7196     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7197     * point localization, image resolution, and the image noise.
7198     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
7199     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7200     * @param maxIters The maximum number of robust method iterations.
7201     *
7202     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7203     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7204     *
7205     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7206     *
7207     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7208     * second images, respectively. The result of this function may be passed further to
7209     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7210     * @return automatically generated
7211     */
7212    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, int maxIters, Mat mask) {
7213        return new Mat(findEssentialMat_0(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold, maxIters, mask.nativeObj));
7214    }
7215
7216    /**
7217     * Calculates an essential matrix from the corresponding points in two images.
7218     *
7219     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7220     * be floating-point (single or double precision).
7221     * @param points2 Array of the second image points of the same size and format as points1 .
7222     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7223     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7224     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7225     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7226     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7227     * passing these coordinates, pass the identity matrix for this parameter.
7228     * @param method Method for computing an essential matrix.
7229     * <ul>
7230     *   <li>
7231     *    REF: RANSAC for the RANSAC algorithm.
7232     *   </li>
7233     *   <li>
7234     *    REF: LMEDS for the LMedS algorithm.
7235     *   </li>
7236     * </ul>
7237     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7238     * confidence (probability) that the estimated matrix is correct.
7239     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7240     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7241     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7242     * point localization, image resolution, and the image noise.
7243     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7244     * @param maxIters The maximum number of robust method iterations.
7245     *
7246     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7247     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7248     *
7249     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7250     *
7251     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7252     * second images, respectively. The result of this function may be passed further to
7253     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7254     * @return automatically generated
7255     */
7256    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, int maxIters) {
7257        return new Mat(findEssentialMat_1(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold, maxIters));
7258    }
7259
7260    /**
7261     * Calculates an essential matrix from the corresponding points in two images.
7262     *
7263     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7264     * be floating-point (single or double precision).
7265     * @param points2 Array of the second image points of the same size and format as points1 .
7266     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7267     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7268     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7269     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7270     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7271     * passing these coordinates, pass the identity matrix for this parameter.
7272     * @param method Method for computing an essential matrix.
7273     * <ul>
7274     *   <li>
7275     *    REF: RANSAC for the RANSAC algorithm.
7276     *   </li>
7277     *   <li>
7278     *    REF: LMEDS for the LMedS algorithm.
7279     *   </li>
7280     * </ul>
7281     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7282     * confidence (probability) that the estimated matrix is correct.
7283     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7284     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7285     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7286     * point localization, image resolution, and the image noise.
7287     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7288     *
7289     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7290     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7291     *
7292     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7293     *
7294     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7295     * second images, respectively. The result of this function may be passed further to
7296     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7297     * @return automatically generated
7298     */
7299    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold) {
7300        return new Mat(findEssentialMat_2(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold));
7301    }
7302
7303    /**
7304     * Calculates an essential matrix from the corresponding points in two images.
7305     *
7306     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7307     * be floating-point (single or double precision).
7308     * @param points2 Array of the second image points of the same size and format as points1 .
7309     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7310     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7311     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7312     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7313     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7314     * passing these coordinates, pass the identity matrix for this parameter.
7315     * @param method Method for computing an essential matrix.
7316     * <ul>
7317     *   <li>
7318     *    REF: RANSAC for the RANSAC algorithm.
7319     *   </li>
7320     *   <li>
7321     *    REF: LMEDS for the LMedS algorithm.
7322     *   </li>
7323     * </ul>
7324     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7325     * confidence (probability) that the estimated matrix is correct.
7326     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7327     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7328     * point localization, image resolution, and the image noise.
7329     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7330     *
7331     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7332     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7333     *
7334     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7335     *
7336     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7337     * second images, respectively. The result of this function may be passed further to
7338     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7339     * @return automatically generated
7340     */
7341    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob) {
7342        return new Mat(findEssentialMat_3(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob));
7343    }
7344
7345    /**
7346     * Calculates an essential matrix from the corresponding points in two images.
7347     *
7348     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7349     * be floating-point (single or double precision).
7350     * @param points2 Array of the second image points of the same size and format as points1 .
7351     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7352     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7353     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7354     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7355     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7356     * passing these coordinates, pass the identity matrix for this parameter.
7357     * @param method Method for computing an essential matrix.
7358     * <ul>
7359     *   <li>
7360     *    REF: RANSAC for the RANSAC algorithm.
7361     *   </li>
7362     *   <li>
7363     *    REF: LMEDS for the LMedS algorithm.
7364     *   </li>
7365     * </ul>
7366     * confidence (probability) that the estimated matrix is correct.
7367     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7368     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7369     * point localization, image resolution, and the image noise.
7370     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7371     *
7372     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7373     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7374     *
7375     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7376     *
7377     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7378     * second images, respectively. The result of this function may be passed further to
7379     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7380     * @return automatically generated
7381     */
7382    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method) {
7383        return new Mat(findEssentialMat_4(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method));
7384    }
7385
7386    /**
7387     * Calculates an essential matrix from the corresponding points in two images.
7388     *
7389     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7390     * be floating-point (single or double precision).
7391     * @param points2 Array of the second image points of the same size and format as points1 .
7392     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
7393     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7394     * same camera intrinsic matrix. If this assumption does not hold for your use case, use
7395     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7396     * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
7397     * passing these coordinates, pass the identity matrix for this parameter.
7398     * <ul>
7399     *   <li>
7400     *    REF: RANSAC for the RANSAC algorithm.
7401     *   </li>
7402     *   <li>
7403     *    REF: LMEDS for the LMedS algorithm.
7404     *   </li>
7405     * </ul>
7406     * confidence (probability) that the estimated matrix is correct.
7407     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7408     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7409     * point localization, image resolution, and the image noise.
7410     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7411     *
7412     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7413     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7414     *
7415     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7416     *
7417     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7418     * second images, respectively. The result of this function may be passed further to
7419     * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
7420     * @return automatically generated
7421     */
7422    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix) {
7423        return new Mat(findEssentialMat_5(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj));
7424    }
7425
7426
7427    //
7428    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
7429    //
7430
7431    /**
7432     *
7433     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7434     * be floating-point (single or double precision).
7435     * @param points2 Array of the second image points of the same size and format as points1 .
7436     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7437     * are feature points from cameras with same focal length and principal point.
7438     * @param pp principal point of the camera.
7439     * @param method Method for computing a fundamental matrix.
7440     * <ul>
7441     *   <li>
7442     *    REF: RANSAC for the RANSAC algorithm.
7443     *   </li>
7444     *   <li>
7445     *    REF: LMEDS for the LMedS algorithm.
7446     *   </li>
7447     * </ul>
7448     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7449     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7450     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7451     * point localization, image resolution, and the image noise.
7452     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7453     * confidence (probability) that the estimated matrix is correct.
7454     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
7455     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7456     * @param maxIters The maximum number of robust method iterations.
7457     *
7458     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7459     * principal point:
7460     *
7461     * \(A =
7462     * \begin{bmatrix}
7463     * f &amp; 0 &amp; x_{pp}  \\
7464     * 0 &amp; f &amp; y_{pp}  \\
7465     * 0 &amp; 0 &amp; 1
7466     * \end{bmatrix}\)
7467     * @return automatically generated
7468     */
7469    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, int maxIters, Mat mask) {
7470        return new Mat(findEssentialMat_6(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold, maxIters, mask.nativeObj));
7471    }
7472
7473    /**
7474     *
7475     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7476     * be floating-point (single or double precision).
7477     * @param points2 Array of the second image points of the same size and format as points1 .
7478     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7479     * are feature points from cameras with same focal length and principal point.
7480     * @param pp principal point of the camera.
7481     * @param method Method for computing a fundamental matrix.
7482     * <ul>
7483     *   <li>
7484     *    REF: RANSAC for the RANSAC algorithm.
7485     *   </li>
7486     *   <li>
7487     *    REF: LMEDS for the LMedS algorithm.
7488     *   </li>
7489     * </ul>
7490     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7491     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7492     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7493     * point localization, image resolution, and the image noise.
7494     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7495     * confidence (probability) that the estimated matrix is correct.
7496     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7497     * @param maxIters The maximum number of robust method iterations.
7498     *
7499     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7500     * principal point:
7501     *
7502     * \(A =
7503     * \begin{bmatrix}
7504     * f &amp; 0 &amp; x_{pp}  \\
7505     * 0 &amp; f &amp; y_{pp}  \\
7506     * 0 &amp; 0 &amp; 1
7507     * \end{bmatrix}\)
7508     * @return automatically generated
7509     */
7510    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, int maxIters) {
7511        return new Mat(findEssentialMat_7(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold, maxIters));
7512    }
7513
7514    /**
7515     *
7516     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7517     * be floating-point (single or double precision).
7518     * @param points2 Array of the second image points of the same size and format as points1 .
7519     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7520     * are feature points from cameras with same focal length and principal point.
7521     * @param pp principal point of the camera.
7522     * @param method Method for computing a fundamental matrix.
7523     * <ul>
7524     *   <li>
7525     *    REF: RANSAC for the RANSAC algorithm.
7526     *   </li>
7527     *   <li>
7528     *    REF: LMEDS for the LMedS algorithm.
7529     *   </li>
7530     * </ul>
7531     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7532     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7533     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7534     * point localization, image resolution, and the image noise.
7535     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7536     * confidence (probability) that the estimated matrix is correct.
7537     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7538     *
7539     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7540     * principal point:
7541     *
7542     * \(A =
7543     * \begin{bmatrix}
7544     * f &amp; 0 &amp; x_{pp}  \\
7545     * 0 &amp; f &amp; y_{pp}  \\
7546     * 0 &amp; 0 &amp; 1
7547     * \end{bmatrix}\)
7548     * @return automatically generated
7549     */
7550    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold) {
7551        return new Mat(findEssentialMat_8(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold));
7552    }
7553
7554    /**
7555     *
7556     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7557     * be floating-point (single or double precision).
7558     * @param points2 Array of the second image points of the same size and format as points1 .
7559     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7560     * are feature points from cameras with same focal length and principal point.
7561     * @param pp principal point of the camera.
7562     * @param method Method for computing a fundamental matrix.
7563     * <ul>
7564     *   <li>
7565     *    REF: RANSAC for the RANSAC algorithm.
7566     *   </li>
7567     *   <li>
7568     *    REF: LMEDS for the LMedS algorithm.
7569     *   </li>
7570     * </ul>
7571     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7572     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7573     * point localization, image resolution, and the image noise.
7574     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7575     * confidence (probability) that the estimated matrix is correct.
7576     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7577     *
7578     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7579     * principal point:
7580     *
7581     * \(A =
7582     * \begin{bmatrix}
7583     * f &amp; 0 &amp; x_{pp}  \\
7584     * 0 &amp; f &amp; y_{pp}  \\
7585     * 0 &amp; 0 &amp; 1
7586     * \end{bmatrix}\)
7587     * @return automatically generated
7588     */
7589    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob) {
7590        return new Mat(findEssentialMat_9(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob));
7591    }
7592
7593    /**
7594     *
7595     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7596     * be floating-point (single or double precision).
7597     * @param points2 Array of the second image points of the same size and format as points1 .
7598     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7599     * are feature points from cameras with same focal length and principal point.
7600     * @param pp principal point of the camera.
7601     * @param method Method for computing a fundamental matrix.
7602     * <ul>
7603     *   <li>
7604     *    REF: RANSAC for the RANSAC algorithm.
7605     *   </li>
7606     *   <li>
7607     *    REF: LMEDS for the LMedS algorithm.
7608     *   </li>
7609     * </ul>
7610     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7611     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7612     * point localization, image resolution, and the image noise.
7613     * confidence (probability) that the estimated matrix is correct.
7614     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7615     *
7616     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7617     * principal point:
7618     *
7619     * \(A =
7620     * \begin{bmatrix}
7621     * f &amp; 0 &amp; x_{pp}  \\
7622     * 0 &amp; f &amp; y_{pp}  \\
7623     * 0 &amp; 0 &amp; 1
7624     * \end{bmatrix}\)
7625     * @return automatically generated
7626     */
7627    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method) {
7628        return new Mat(findEssentialMat_10(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method));
7629    }
7630
7631    /**
7632     *
7633     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7634     * be floating-point (single or double precision).
7635     * @param points2 Array of the second image points of the same size and format as points1 .
7636     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7637     * are feature points from cameras with same focal length and principal point.
7638     * @param pp principal point of the camera.
7639     * <ul>
7640     *   <li>
7641     *    REF: RANSAC for the RANSAC algorithm.
7642     *   </li>
7643     *   <li>
7644     *    REF: LMEDS for the LMedS algorithm.
7645     *   </li>
7646     * </ul>
7647     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7648     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7649     * point localization, image resolution, and the image noise.
7650     * confidence (probability) that the estimated matrix is correct.
7651     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7652     *
7653     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7654     * principal point:
7655     *
7656     * \(A =
7657     * \begin{bmatrix}
7658     * f &amp; 0 &amp; x_{pp}  \\
7659     * 0 &amp; f &amp; y_{pp}  \\
7660     * 0 &amp; 0 &amp; 1
7661     * \end{bmatrix}\)
7662     * @return automatically generated
7663     */
7664    public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp) {
7665        return new Mat(findEssentialMat_11(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y));
7666    }
7667
7668    /**
7669     *
7670     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7671     * be floating-point (single or double precision).
7672     * @param points2 Array of the second image points of the same size and format as points1 .
7673     * @param focal focal length of the camera. Note that this function assumes that points1 and points2
7674     * are feature points from cameras with same focal length and principal point.
7675     * <ul>
7676     *   <li>
7677     *    REF: RANSAC for the RANSAC algorithm.
7678     *   </li>
7679     *   <li>
7680     *    REF: LMEDS for the LMedS algorithm.
7681     *   </li>
7682     * </ul>
7683     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7684     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7685     * point localization, image resolution, and the image noise.
7686     * confidence (probability) that the estimated matrix is correct.
7687     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7688     *
7689     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7690     * principal point:
7691     *
7692     * \(A =
7693     * \begin{bmatrix}
7694     * f &amp; 0 &amp; x_{pp}  \\
7695     * 0 &amp; f &amp; y_{pp}  \\
7696     * 0 &amp; 0 &amp; 1
7697     * \end{bmatrix}\)
7698     * @return automatically generated
7699     */
7700    public static Mat findEssentialMat(Mat points1, Mat points2, double focal) {
7701        return new Mat(findEssentialMat_12(points1.nativeObj, points2.nativeObj, focal));
7702    }
7703
7704    /**
7705     *
7706     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7707     * be floating-point (single or double precision).
7708     * @param points2 Array of the second image points of the same size and format as points1 .
7709     * are feature points from cameras with same focal length and principal point.
7710     * <ul>
7711     *   <li>
7712     *    REF: RANSAC for the RANSAC algorithm.
7713     *   </li>
7714     *   <li>
7715     *    REF: LMEDS for the LMedS algorithm.
7716     *   </li>
7717     * </ul>
7718     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7719     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7720     * point localization, image resolution, and the image noise.
7721     * confidence (probability) that the estimated matrix is correct.
7722     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7723     *
7724     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
7725     * principal point:
7726     *
7727     * \(A =
7728     * \begin{bmatrix}
7729     * f &amp; 0 &amp; x_{pp}  \\
7730     * 0 &amp; f &amp; y_{pp}  \\
7731     * 0 &amp; 0 &amp; 1
7732     * \end{bmatrix}\)
7733     * @return automatically generated
7734     */
7735    public static Mat findEssentialMat(Mat points1, Mat points2) {
7736        return new Mat(findEssentialMat_13(points1.nativeObj, points2.nativeObj));
7737    }
7738
7739
7740    //
7741    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
7742    //
7743
7744    /**
7745     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
7746     *
7747     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7748     * be floating-point (single or double precision).
7749     * @param points2 Array of the second image points of the same size and format as points1 .
7750     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7751     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7752     * same camera matrix. If this assumption does not hold for your use case, use
7753     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7754     * to normalized image coordinates, which are valid for the identity camera matrix. When
7755     * passing these coordinates, pass the identity matrix for this parameter.
7756     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7757     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7758     * same camera matrix. If this assumption does not hold for your use case, use
7759     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7760     * to normalized image coordinates, which are valid for the identity camera matrix. When
7761     * passing these coordinates, pass the identity matrix for this parameter.
7762     * @param distCoeffs1 Input vector of distortion coefficients
7763     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7764     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7765     * @param distCoeffs2 Input vector of distortion coefficients
7766     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7767     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7768     * @param method Method for computing an essential matrix.
7769     * <ul>
7770     *   <li>
7771     *    REF: RANSAC for the RANSAC algorithm.
7772     *   </li>
7773     *   <li>
7774     *    REF: LMEDS for the LMedS algorithm.
7775     *   </li>
7776     * </ul>
7777     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7778     * confidence (probability) that the estimated matrix is correct.
7779     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7780     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7781     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7782     * point localization, image resolution, and the image noise.
7783     * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
7784     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7785     *
7786     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7787     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7788     *
7789     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7790     *
7791     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7792     * second images, respectively. The result of this function may be passed further to
7793     * #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
7794     * @return automatically generated
7795     */
7796    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold, Mat mask) {
7797        return new Mat(findEssentialMat_14(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold, mask.nativeObj));
7798    }
7799
7800    /**
7801     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
7802     *
7803     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7804     * be floating-point (single or double precision).
7805     * @param points2 Array of the second image points of the same size and format as points1 .
7806     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7807     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7808     * same camera matrix. If this assumption does not hold for your use case, use
7809     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7810     * to normalized image coordinates, which are valid for the identity camera matrix. When
7811     * passing these coordinates, pass the identity matrix for this parameter.
7812     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7813     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7814     * same camera matrix. If this assumption does not hold for your use case, use
7815     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7816     * to normalized image coordinates, which are valid for the identity camera matrix. When
7817     * passing these coordinates, pass the identity matrix for this parameter.
7818     * @param distCoeffs1 Input vector of distortion coefficients
7819     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7820     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7821     * @param distCoeffs2 Input vector of distortion coefficients
7822     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7823     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7824     * @param method Method for computing an essential matrix.
7825     * <ul>
7826     *   <li>
7827     *    REF: RANSAC for the RANSAC algorithm.
7828     *   </li>
7829     *   <li>
7830     *    REF: LMEDS for the LMedS algorithm.
7831     *   </li>
7832     * </ul>
7833     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7834     * confidence (probability) that the estimated matrix is correct.
7835     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
7836     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7837     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7838     * point localization, image resolution, and the image noise.
7839     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7840     *
7841     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7842     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7843     *
7844     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7845     *
7846     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7847     * second images, respectively. The result of this function may be passed further to
7848     * #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
7849     * @return automatically generated
7850     */
7851    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold) {
7852        return new Mat(findEssentialMat_15(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold));
7853    }
7854
7855    /**
7856     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
7857     *
7858     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7859     * be floating-point (single or double precision).
7860     * @param points2 Array of the second image points of the same size and format as points1 .
7861     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7862     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7863     * same camera matrix. If this assumption does not hold for your use case, use
7864     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7865     * to normalized image coordinates, which are valid for the identity camera matrix. When
7866     * passing these coordinates, pass the identity matrix for this parameter.
7867     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7868     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7869     * same camera matrix. If this assumption does not hold for your use case, use
7870     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7871     * to normalized image coordinates, which are valid for the identity camera matrix. When
7872     * passing these coordinates, pass the identity matrix for this parameter.
7873     * @param distCoeffs1 Input vector of distortion coefficients
7874     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7875     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7876     * @param distCoeffs2 Input vector of distortion coefficients
7877     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7878     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7879     * @param method Method for computing an essential matrix.
7880     * <ul>
7881     *   <li>
7882     *    REF: RANSAC for the RANSAC algorithm.
7883     *   </li>
7884     *   <li>
7885     *    REF: LMEDS for the LMedS algorithm.
7886     *   </li>
7887     * </ul>
7888     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
7889     * confidence (probability) that the estimated matrix is correct.
7890     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7891     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7892     * point localization, image resolution, and the image noise.
7893     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7894     *
7895     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7896     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7897     *
7898     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7899     *
7900     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7901     * second images, respectively. The result of this function may be passed further to
7902     * #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
7903     * @return automatically generated
7904     */
7905    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob) {
7906        return new Mat(findEssentialMat_16(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob));
7907    }
7908
7909    /**
7910     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
7911     *
7912     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7913     * be floating-point (single or double precision).
7914     * @param points2 Array of the second image points of the same size and format as points1 .
7915     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7916     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7917     * same camera matrix. If this assumption does not hold for your use case, use
7918     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7919     * to normalized image coordinates, which are valid for the identity camera matrix. When
7920     * passing these coordinates, pass the identity matrix for this parameter.
7921     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7922     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7923     * same camera matrix. If this assumption does not hold for your use case, use
7924     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7925     * to normalized image coordinates, which are valid for the identity camera matrix. When
7926     * passing these coordinates, pass the identity matrix for this parameter.
7927     * @param distCoeffs1 Input vector of distortion coefficients
7928     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7929     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7930     * @param distCoeffs2 Input vector of distortion coefficients
7931     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7932     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7933     * @param method Method for computing an essential matrix.
7934     * <ul>
7935     *   <li>
7936     *    REF: RANSAC for the RANSAC algorithm.
7937     *   </li>
7938     *   <li>
7939     *    REF: LMEDS for the LMedS algorithm.
7940     *   </li>
7941     * </ul>
7942     * confidence (probability) that the estimated matrix is correct.
7943     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7944     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7945     * point localization, image resolution, and the image noise.
7946     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7947     *
7948     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
7949     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
7950     *
7951     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
7952     *
7953     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
7954     * second images, respectively. The result of this function may be passed further to
7955     * #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
7956     * @return automatically generated
7957     */
7958    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method) {
7959        return new Mat(findEssentialMat_17(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method));
7960    }
7961
7962    /**
7963     * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
7964     *
7965     * @param points1 Array of N (N &gt;= 5) 2D points from the first image. The point coordinates should
7966     * be floating-point (single or double precision).
7967     * @param points2 Array of the second image points of the same size and format as points1 .
7968     * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7969     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7970     * same camera matrix. If this assumption does not hold for your use case, use
7971     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7972     * to normalized image coordinates, which are valid for the identity camera matrix. When
7973     * passing these coordinates, pass the identity matrix for this parameter.
7974     * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
7975     * Note that this function assumes that points1 and points2 are feature points from cameras with the
7976     * same camera matrix. If this assumption does not hold for your use case, use
7977     * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points
7978     * to normalized image coordinates, which are valid for the identity camera matrix. When
7979     * passing these coordinates, pass the identity matrix for this parameter.
7980     * @param distCoeffs1 Input vector of distortion coefficients
7981     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7982     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7983     * @param distCoeffs2 Input vector of distortion coefficients
7984     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
7985     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
7986     * <ul>
7987     *   <li>
7988     *    REF: RANSAC for the RANSAC algorithm.
7989     *   </li>
7990     *   <li>
7991     *    REF: LMEDS for the LMedS algorithm.
7992     *   </li>
7993     * </ul>
7994     * confidence (probability) that the estimated matrix is correct.
7995     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
7996     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
7997     * point localization, image resolution, and the image noise.
7998     * for the other points. The array is computed only in the RANSAC and LMedS methods.
7999     *
8000     * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 .
8001     * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
8002     *
8003     * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\)
8004     *
8005     * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the
8006     * second images, respectively. The result of this function may be passed further to
8007     * #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
8008     * @return automatically generated
8009     */
8010    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2) {
8011        return new Mat(findEssentialMat_18(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj));
8012    }
8013
8014
8015    //
8016    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
8017    //
8018
8019    public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat mask, UsacParams params) {
8020        return new Mat(findEssentialMat_19(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, cameraMatrix2.nativeObj, dist_coeff1.nativeObj, dist_coeff2.nativeObj, mask.nativeObj, params.nativeObj));
8021    }
8022
8023
8024    //
8025    // C++:  void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
8026    //
8027
8028    /**
8029     * Decompose an essential matrix to possible rotations and translation.
8030     *
8031     * @param E The input essential matrix.
8032     * @param R1 One possible rotation matrix.
8033     * @param R2 Another possible rotation matrix.
8034     * @param t One possible translation.
8035     *
8036     * This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In
8037     * general, four possible poses exist for the decomposition of E. They are \([R_1, t]\),
8038     * \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\).
8039     *
8040     * If E gives the epipolar constraint \([p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\) between the image
8041     * points \(p_1\) in the first image and \(p_2\) in second image, then any of the tuples
8042     * \([R_1, t]\), \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\) is a change of basis from the first
8043     * camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
8044     * can only get the direction of the translation. For this reason, the translation t is returned with
8045     * unit length.
8046     */
8047    public static void decomposeEssentialMat(Mat E, Mat R1, Mat R2, Mat t) {
8048        decomposeEssentialMat_0(E.nativeObj, R1.nativeObj, R2.nativeObj, t.nativeObj);
8049    }
8050
8051
8052    //
8053    // C++:  int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
8054    //
8055
8056    /**
8057     * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
8058     * inliers that pass the check.
8059     *
8060     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8061     * floating-point (single or double precision).
8062     * @param points2 Array of the second image points of the same size and format as points1 .
8063     * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
8064     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8065     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
8066     * REF: calibrateCamera.
8067     * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
8068     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8069     * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
8070     * REF: calibrateCamera.
8071     * @param E The output essential matrix.
8072     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8073     * that performs a change of basis from the first camera's coordinate system to the second camera's
8074     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8075     * described below.
8076     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8077     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8078     * length.
8079     * @param method Method for computing an essential matrix.
8080     * <ul>
8081     *   <li>
8082     *    REF: RANSAC for the RANSAC algorithm.
8083     *   </li>
8084     *   <li>
8085     *    REF: LMEDS for the LMedS algorithm.
8086     *   </li>
8087     * </ul>
8088     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8089     * confidence (probability) that the estimated matrix is correct.
8090     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8091     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8092     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8093     * point localization, image resolution, and the image noise.
8094     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
8095     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
8096     * recover pose. In the output mask only inliers which pass the cheirality check.
8097     *
8098     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8099     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
8100     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8101     *
8102     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8103     * scenario, points1 and points2 are the same input for findEssentialMat.:
8104     * <code>
8105     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8106     *     int point_count = 100;
8107     *     vector&lt;Point2f&gt; points1(point_count);
8108     *     vector&lt;Point2f&gt; points2(point_count);
8109     *
8110     *     // initialize the points here ...
8111     *     for( int i = 0; i &lt; point_count; i++ )
8112     *     {
8113     *         points1[i] = ...;
8114     *         points2[i] = ...;
8115     *     }
8116     *
8117     *     // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
8118     *     Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
8119     *
8120     *     // Output: Essential matrix, relative rotation and relative translation.
8121     *     Mat E, R, t, mask;
8122     *
8123     *     recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
8124     * </code>
8125     * @return automatically generated
8126     */
8127    public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob, double threshold, Mat mask) {
8128        return recoverPose_0(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob, threshold, mask.nativeObj);
8129    }
8130
8131    /**
8132     * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
8133     * inliers that pass the check.
8134     *
8135     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8136     * floating-point (single or double precision).
8137     * @param points2 Array of the second image points of the same size and format as points1 .
8138     * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
8139     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8140     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
8141     * REF: calibrateCamera.
8142     * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
8143     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8144     * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
8145     * REF: calibrateCamera.
8146     * @param E The output essential matrix.
8147     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8148     * that performs a change of basis from the first camera's coordinate system to the second camera's
8149     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8150     * described below.
8151     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8152     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8153     * length.
8154     * @param method Method for computing an essential matrix.
8155     * <ul>
8156     *   <li>
8157     *    REF: RANSAC for the RANSAC algorithm.
8158     *   </li>
8159     *   <li>
8160     *    REF: LMEDS for the LMedS algorithm.
8161     *   </li>
8162     * </ul>
8163     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8164     * confidence (probability) that the estimated matrix is correct.
8165     * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
8166     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8167     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8168     * point localization, image resolution, and the image noise.
8169     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
8170     * recover pose. In the output mask only inliers which pass the cheirality check.
8171     *
8172     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8173     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
8174     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8175     *
8176     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8177     * scenario, points1 and points2 are the same input for findEssentialMat.:
8178     * <code>
8179     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8180     *     int point_count = 100;
8181     *     vector&lt;Point2f&gt; points1(point_count);
8182     *     vector&lt;Point2f&gt; points2(point_count);
8183     *
8184     *     // initialize the points here ...
8185     *     for( int i = 0; i &lt; point_count; i++ )
8186     *     {
8187     *         points1[i] = ...;
8188     *         points2[i] = ...;
8189     *     }
8190     *
8191     *     // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
8192     *     Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
8193     *
8194     *     // Output: Essential matrix, relative rotation and relative translation.
8195     *     Mat E, R, t, mask;
8196     *
8197     *     recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
8198     * </code>
8199     * @return automatically generated
8200     */
8201    public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob, double threshold) {
8202        return recoverPose_1(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob, threshold);
8203    }
8204
8205    /**
8206     * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
8207     * inliers that pass the check.
8208     *
8209     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8210     * floating-point (single or double precision).
8211     * @param points2 Array of the second image points of the same size and format as points1 .
8212     * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
8213     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8214     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
8215     * REF: calibrateCamera.
8216     * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
8217     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8218     * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
8219     * REF: calibrateCamera.
8220     * @param E The output essential matrix.
8221     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8222     * that performs a change of basis from the first camera's coordinate system to the second camera's
8223     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8224     * described below.
8225     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8226     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8227     * length.
8228     * @param method Method for computing an essential matrix.
8229     * <ul>
8230     *   <li>
8231     *    REF: RANSAC for the RANSAC algorithm.
8232     *   </li>
8233     *   <li>
8234     *    REF: LMEDS for the LMedS algorithm.
8235     *   </li>
8236     * </ul>
8237     * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
8238     * confidence (probability) that the estimated matrix is correct.
8239     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8240     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8241     * point localization, image resolution, and the image noise.
8242     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
8243     * recover pose. In the output mask only inliers which pass the cheirality check.
8244     *
8245     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8246     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
8247     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8248     *
8249     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8250     * scenario, points1 and points2 are the same input for findEssentialMat.:
8251     * <code>
8252     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8253     *     int point_count = 100;
8254     *     vector&lt;Point2f&gt; points1(point_count);
8255     *     vector&lt;Point2f&gt; points2(point_count);
8256     *
8257     *     // initialize the points here ...
8258     *     for( int i = 0; i &lt; point_count; i++ )
8259     *     {
8260     *         points1[i] = ...;
8261     *         points2[i] = ...;
8262     *     }
8263     *
8264     *     // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
8265     *     Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
8266     *
8267     *     // Output: Essential matrix, relative rotation and relative translation.
8268     *     Mat E, R, t, mask;
8269     *
8270     *     recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
8271     * </code>
8272     * @return automatically generated
8273     */
8274    public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob) {
8275        return recoverPose_2(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob);
8276    }
8277
8278    /**
8279     * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
8280     * inliers that pass the check.
8281     *
8282     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8283     * floating-point (single or double precision).
8284     * @param points2 Array of the second image points of the same size and format as points1 .
8285     * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
8286     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8287     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
8288     * REF: calibrateCamera.
8289     * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
8290     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8291     * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
8292     * REF: calibrateCamera.
8293     * @param E The output essential matrix.
8294     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8295     * that performs a change of basis from the first camera's coordinate system to the second camera's
8296     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8297     * described below.
8298     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8299     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8300     * length.
8301     * @param method Method for computing an essential matrix.
8302     * <ul>
8303     *   <li>
8304     *    REF: RANSAC for the RANSAC algorithm.
8305     *   </li>
8306     *   <li>
8307     *    REF: LMEDS for the LMedS algorithm.
8308     *   </li>
8309     * </ul>
8310     * confidence (probability) that the estimated matrix is correct.
8311     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8312     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8313     * point localization, image resolution, and the image noise.
8314     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
8315     * recover pose. In the output mask only inliers which pass the cheirality check.
8316     *
8317     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8318     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
8319     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8320     *
8321     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8322     * scenario, points1 and points2 are the same input for findEssentialMat.:
8323     * <code>
8324     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8325     *     int point_count = 100;
8326     *     vector&lt;Point2f&gt; points1(point_count);
8327     *     vector&lt;Point2f&gt; points2(point_count);
8328     *
8329     *     // initialize the points here ...
8330     *     for( int i = 0; i &lt; point_count; i++ )
8331     *     {
8332     *         points1[i] = ...;
8333     *         points2[i] = ...;
8334     *     }
8335     *
8336     *     // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
8337     *     Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
8338     *
8339     *     // Output: Essential matrix, relative rotation and relative translation.
8340     *     Mat E, R, t, mask;
8341     *
8342     *     recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
8343     * </code>
8344     * @return automatically generated
8345     */
8346    public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method) {
8347        return recoverPose_3(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method);
8348    }
8349
8350    /**
8351     * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
8352     * inliers that pass the check.
8353     *
8354     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8355     * floating-point (single or double precision).
8356     * @param points2 Array of the second image points of the same size and format as points1 .
8357     * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
8358     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8359     * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
8360     * REF: calibrateCamera.
8361     * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
8362     * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
8363     * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in
8364     * REF: calibrateCamera.
8365     * @param E The output essential matrix.
8366     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8367     * that performs a change of basis from the first camera's coordinate system to the second camera's
8368     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8369     * described below.
8370     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8371     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8372     * length.
8373     * <ul>
8374     *   <li>
8375     *    REF: RANSAC for the RANSAC algorithm.
8376     *   </li>
8377     *   <li>
8378     *    REF: LMEDS for the LMedS algorithm.
8379     *   </li>
8380     * </ul>
8381     * confidence (probability) that the estimated matrix is correct.
8382     * line in pixels, beyond which the point is considered an outlier and is not used for computing the
8383     * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
8384     * point localization, image resolution, and the image noise.
8385     * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
8386     * recover pose. In the output mask only inliers which pass the cheirality check.
8387     *
8388     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8389     * possible pose hypotheses by doing cheirality check. The cheirality check means that the
8390     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8391     *
8392     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8393     * scenario, points1 and points2 are the same input for findEssentialMat.:
8394     * <code>
8395     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8396     *     int point_count = 100;
8397     *     vector&lt;Point2f&gt; points1(point_count);
8398     *     vector&lt;Point2f&gt; points2(point_count);
8399     *
8400     *     // initialize the points here ...
8401     *     for( int i = 0; i &lt; point_count; i++ )
8402     *     {
8403     *         points1[i] = ...;
8404     *         points2[i] = ...;
8405     *     }
8406     *
8407     *     // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
8408     *     Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
8409     *
8410     *     // Output: Essential matrix, relative rotation and relative translation.
8411     *     Mat E, R, t, mask;
8412     *
8413     *     recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
8414     * </code>
8415     * @return automatically generated
8416     */
8417    public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t) {
8418        return recoverPose_4(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj);
8419    }
8420
8421
8422    //
8423    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
8424    //
8425
8426    /**
8427     * Recovers the relative camera rotation and the translation from an estimated essential
8428     * matrix and the corresponding points in two images, using chirality check. Returns the number of
8429     * inliers that pass the check.
8430     *
8431     * @param E The input essential matrix.
8432     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8433     * floating-point (single or double precision).
8434     * @param points2 Array of the second image points of the same size and format as points1 .
8435     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8436     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8437     * same camera intrinsic matrix.
8438     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8439     * that performs a change of basis from the first camera's coordinate system to the second camera's
8440     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8441     * described below.
8442     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8443     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8444     * length.
8445     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
8446     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8447     * recover pose. In the output mask only inliers which pass the chirality check.
8448     *
8449     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8450     * possible pose hypotheses by doing chirality check. The chirality check means that the
8451     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8452     *
8453     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8454     * scenario, points1 and points2 are the same input for #findEssentialMat :
8455     * <code>
8456     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8457     *     int point_count = 100;
8458     *     vector&lt;Point2f&gt; points1(point_count);
8459     *     vector&lt;Point2f&gt; points2(point_count);
8460     *
8461     *     // initialize the points here ...
8462     *     for( int i = 0; i &lt; point_count; i++ )
8463     *     {
8464     *         points1[i] = ...;
8465     *         points2[i] = ...;
8466     *     }
8467     *
8468     *     // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
8469     *     Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
8470     *
8471     *     Mat E, R, t, mask;
8472     *
8473     *     E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
8474     *     recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
8475     * </code>
8476     * @return automatically generated
8477     */
8478    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, Mat mask) {
8479        return recoverPose_5(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, mask.nativeObj);
8480    }
8481
8482    /**
8483     * Recovers the relative camera rotation and the translation from an estimated essential
8484     * matrix and the corresponding points in two images, using chirality check. Returns the number of
8485     * inliers that pass the check.
8486     *
8487     * @param E The input essential matrix.
8488     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8489     * floating-point (single or double precision).
8490     * @param points2 Array of the second image points of the same size and format as points1 .
8491     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8492     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8493     * same camera intrinsic matrix.
8494     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8495     * that performs a change of basis from the first camera's coordinate system to the second camera's
8496     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8497     * described below.
8498     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8499     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8500     * length.
8501     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8502     * recover pose. In the output mask only inliers which pass the chirality check.
8503     *
8504     * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies
8505     * possible pose hypotheses by doing chirality check. The chirality check means that the
8506     * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03.
8507     *
8508     * This function can be used to process the output E and mask from REF: findEssentialMat. In this
8509     * scenario, points1 and points2 are the same input for #findEssentialMat :
8510     * <code>
8511     *     // Example. Estimation of fundamental matrix using the RANSAC algorithm
8512     *     int point_count = 100;
8513     *     vector&lt;Point2f&gt; points1(point_count);
8514     *     vector&lt;Point2f&gt; points2(point_count);
8515     *
8516     *     // initialize the points here ...
8517     *     for( int i = 0; i &lt; point_count; i++ )
8518     *     {
8519     *         points1[i] = ...;
8520     *         points2[i] = ...;
8521     *     }
8522     *
8523     *     // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
8524     *     Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
8525     *
8526     *     Mat E, R, t, mask;
8527     *
8528     *     E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
8529     *     recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
8530     * </code>
8531     * @return automatically generated
8532     */
8533    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t) {
8534        return recoverPose_6(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj);
8535    }
8536
8537
8538    //
8539    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
8540    //
8541
8542    /**
8543     *
8544     * @param E The input essential matrix.
8545     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8546     * floating-point (single or double precision).
8547     * @param points2 Array of the second image points of the same size and format as points1 .
8548     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8549     * that performs a change of basis from the first camera's coordinate system to the second camera's
8550     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8551     * description below.
8552     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8553     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8554     * length.
8555     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
8556     * are feature points from cameras with same focal length and principal point.
8557     * @param pp principal point of the camera.
8558     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
8559     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8560     * recover pose. In the output mask only inliers which pass the chirality check.
8561     *
8562     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8563     * principal point:
8564     *
8565     * \(A =
8566     * \begin{bmatrix}
8567     * f &amp; 0 &amp; x_{pp}  \\
8568     * 0 &amp; f &amp; y_{pp}  \\
8569     * 0 &amp; 0 &amp; 1
8570     * \end{bmatrix}\)
8571     * @return automatically generated
8572     */
8573    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp, Mat mask) {
8574        return recoverPose_7(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y, mask.nativeObj);
8575    }
8576
8577    /**
8578     *
8579     * @param E The input essential matrix.
8580     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8581     * floating-point (single or double precision).
8582     * @param points2 Array of the second image points of the same size and format as points1 .
8583     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8584     * that performs a change of basis from the first camera's coordinate system to the second camera's
8585     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8586     * description below.
8587     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8588     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8589     * length.
8590     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
8591     * are feature points from cameras with same focal length and principal point.
8592     * @param pp principal point of the camera.
8593     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8594     * recover pose. In the output mask only inliers which pass the chirality check.
8595     *
8596     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8597     * principal point:
8598     *
8599     * \(A =
8600     * \begin{bmatrix}
8601     * f &amp; 0 &amp; x_{pp}  \\
8602     * 0 &amp; f &amp; y_{pp}  \\
8603     * 0 &amp; 0 &amp; 1
8604     * \end{bmatrix}\)
8605     * @return automatically generated
8606     */
8607    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp) {
8608        return recoverPose_8(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y);
8609    }
8610
8611    /**
8612     *
8613     * @param E The input essential matrix.
8614     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8615     * floating-point (single or double precision).
8616     * @param points2 Array of the second image points of the same size and format as points1 .
8617     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8618     * that performs a change of basis from the first camera's coordinate system to the second camera's
8619     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8620     * description below.
8621     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8622     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8623     * length.
8624     * @param focal Focal length of the camera. Note that this function assumes that points1 and points2
8625     * are feature points from cameras with same focal length and principal point.
8626     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8627     * recover pose. In the output mask only inliers which pass the chirality check.
8628     *
8629     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8630     * principal point:
8631     *
8632     * \(A =
8633     * \begin{bmatrix}
8634     * f &amp; 0 &amp; x_{pp}  \\
8635     * 0 &amp; f &amp; y_{pp}  \\
8636     * 0 &amp; 0 &amp; 1
8637     * \end{bmatrix}\)
8638     * @return automatically generated
8639     */
8640    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal) {
8641        return recoverPose_9(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal);
8642    }
8643
8644    /**
8645     *
8646     * @param E The input essential matrix.
8647     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8648     * floating-point (single or double precision).
8649     * @param points2 Array of the second image points of the same size and format as points1 .
8650     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8651     * that performs a change of basis from the first camera's coordinate system to the second camera's
8652     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8653     * description below.
8654     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8655     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8656     * length.
8657     * are feature points from cameras with same focal length and principal point.
8658     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8659     * recover pose. In the output mask only inliers which pass the chirality check.
8660     *
8661     * This function differs from the one above that it computes camera intrinsic matrix from focal length and
8662     * principal point:
8663     *
8664     * \(A =
8665     * \begin{bmatrix}
8666     * f &amp; 0 &amp; x_{pp}  \\
8667     * 0 &amp; f &amp; y_{pp}  \\
8668     * 0 &amp; 0 &amp; 1
8669     * \end{bmatrix}\)
8670     * @return automatically generated
8671     */
8672    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t) {
8673        return recoverPose_10(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj);
8674    }
8675
8676
8677    //
8678    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
8679    //
8680
8681    /**
8682     *
8683     * @param E The input essential matrix.
8684     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8685     * floating-point (single or double precision).
8686     * @param points2 Array of the second image points of the same size and format as points1.
8687     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8688     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8689     * same camera intrinsic matrix.
8690     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8691     * that performs a change of basis from the first camera's coordinate system to the second camera's
8692     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8693     * description below.
8694     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8695     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8696     * length.
8697     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
8698     * points).
8699     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
8700     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8701     * recover pose. In the output mask only inliers which pass the chirality check.
8702     * @param triangulatedPoints 3D points which were reconstructed by triangulation.
8703     *
8704     * This function differs from the one above that it outputs the triangulated 3D point that are used for
8705     * the chirality check.
8706     * @return automatically generated
8707     */
8708    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask, Mat triangulatedPoints) {
8709        return recoverPose_11(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj, triangulatedPoints.nativeObj);
8710    }
8711
8712    /**
8713     *
8714     * @param E The input essential matrix.
8715     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8716     * floating-point (single or double precision).
8717     * @param points2 Array of the second image points of the same size and format as points1.
8718     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8719     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8720     * same camera intrinsic matrix.
8721     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8722     * that performs a change of basis from the first camera's coordinate system to the second camera's
8723     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8724     * description below.
8725     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8726     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8727     * length.
8728     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
8729     * points).
8730     * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
8731     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8732     * recover pose. In the output mask only inliers which pass the chirality check.
8733     *
8734     * This function differs from the one above that it outputs the triangulated 3D point that are used for
8735     * the chirality check.
8736     * @return automatically generated
8737     */
8738    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask) {
8739        return recoverPose_12(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj);
8740    }
8741
8742    /**
8743     *
8744     * @param E The input essential matrix.
8745     * @param points1 Array of N 2D points from the first image. The point coordinates should be
8746     * floating-point (single or double precision).
8747     * @param points2 Array of the second image points of the same size and format as points1.
8748     * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) .
8749     * Note that this function assumes that points1 and points2 are feature points from cameras with the
8750     * same camera intrinsic matrix.
8751     * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
8752     * that performs a change of basis from the first camera's coordinate system to the second camera's
8753     * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
8754     * description below.
8755     * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and
8756     * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
8757     * length.
8758     * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
8759     * points).
8760     * inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
8761     * recover pose. In the output mask only inliers which pass the chirality check.
8762     *
8763     * This function differs from the one above that it outputs the triangulated 3D point that are used for
8764     * the chirality check.
8765     * @return automatically generated
8766     */
8767    public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh) {
8768        return recoverPose_13(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh);
8769    }
8770
8771
8772    //
8773    // C++:  void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
8774    //
8775
8776    /**
8777     * For points in an image of a stereo pair, computes the corresponding epilines in the other image.
8778     *
8779     * @param points Input points. \(N \times 1\) or \(1 \times N\) matrix of type CV_32FC2 or
8780     * vector&lt;Point2f&gt; .
8781     * @param whichImage Index of the image (1 or 2) that contains the points .
8782     * @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .
8783     * @param lines Output vector of the epipolar lines corresponding to the points in the other image.
8784     * Each line \(ax + by + c=0\) is encoded by 3 numbers \((a, b, c)\) .
8785     *
8786     * For every point in one of the two images of a stereo pair, the function finds the equation of the
8787     * corresponding epipolar line in the other image.
8788     *
8789     * From the fundamental matrix definition (see #findFundamentalMat ), line \(l^{(2)}_i\) in the second
8790     * image for the point \(p^{(1)}_i\) in the first image (when whichImage=1 ) is computed as:
8791     *
8792     * \(l^{(2)}_i = F p^{(1)}_i\)
8793     *
8794     * And vice versa, when whichImage=2, \(l^{(1)}_i\) is computed from \(p^{(2)}_i\) as:
8795     *
8796     * \(l^{(1)}_i = F^T p^{(2)}_i\)
8797     *
8798     * Line coefficients are defined up to a scale. They are normalized so that \(a_i^2+b_i^2=1\) .
8799     */
8800    public static void computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat lines) {
8801        computeCorrespondEpilines_0(points.nativeObj, whichImage, F.nativeObj, lines.nativeObj);
8802    }
8803
8804
8805    //
8806    // C++:  void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
8807    //
8808
8809    /**
8810     * This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
8811     * their observations with a stereo camera.
8812     *
8813     * @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
8814     * given in the world's coordinate system into the first image.
8815     * @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
8816     * given in the world's coordinate system into the second image.
8817     * @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
8818     * it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
8819     * @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
8820     * version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
8821     * @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
8822     * returned in the world's coordinate system.
8823     *
8824     * <b>Note:</b>
8825     *    Keep in mind that all input data should be of float type in order for this function to work.
8826     *
8827     * <b>Note:</b>
8828     *    If the projection matrices from REF: stereoRectify are used, then the returned points are
8829     *    represented in the first camera's rectified coordinate system.
8830     *
8831     * SEE:
8832     *    reprojectImageTo3D
8833     */
8834    public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D) {
8835        triangulatePoints_0(projMatr1.nativeObj, projMatr2.nativeObj, projPoints1.nativeObj, projPoints2.nativeObj, points4D.nativeObj);
8836    }
8837
8838
8839    //
8840    // C++:  void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
8841    //
8842
8843    /**
8844     * Refines coordinates of corresponding points.
8845     *
8846     * @param F 3x3 fundamental matrix.
8847     * @param points1 1xN array containing the first set of points.
8848     * @param points2 1xN array containing the second set of points.
8849     * @param newPoints1 The optimized points1.
8850     * @param newPoints2 The optimized points2.
8851     *
8852     * The function implements the Optimal Triangulation Method (see Multiple View Geometry CITE: HartleyZ00 for details).
8853     * For each given point correspondence points1[i] &lt;-&gt; points2[i], and a fundamental matrix F, it
8854     * computes the corrected correspondences newPoints1[i] &lt;-&gt; newPoints2[i] that minimize the geometric
8855     * error \(d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\) (where \(d(a,b)\) is the
8856     * geometric distance between points \(a\) and \(b\) ) subject to the epipolar constraint
8857     * \(newPoints2^T \cdot F \cdot newPoints1 = 0\) .
8858     */
8859    public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2) {
8860        correctMatches_0(F.nativeObj, points1.nativeObj, points2.nativeObj, newPoints1.nativeObj, newPoints2.nativeObj);
8861    }
8862
8863
8864    //
8865    // C++:  void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
8866    //
8867
8868    /**
8869     * Filters off small noise blobs (speckles) in the disparity map
8870     *
8871     * @param img The input 16-bit signed disparity image
8872     * @param newVal The disparity value used to paint-off the speckles
8873     * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
8874     * affected by the algorithm
8875     * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
8876     * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
8877     * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
8878     * account when specifying this parameter value.
8879     * @param buf The optional temporary buffer to avoid memory allocation within the function.
8880     */
8881    public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf) {
8882        filterSpeckles_0(img.nativeObj, newVal, maxSpeckleSize, maxDiff, buf.nativeObj);
8883    }
8884
8885    /**
8886     * Filters off small noise blobs (speckles) in the disparity map
8887     *
8888     * @param img The input 16-bit signed disparity image
8889     * @param newVal The disparity value used to paint-off the speckles
8890     * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
8891     * affected by the algorithm
8892     * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
8893     * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
8894     * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
8895     * account when specifying this parameter value.
8896     */
8897    public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff) {
8898        filterSpeckles_1(img.nativeObj, newVal, maxSpeckleSize, maxDiff);
8899    }
8900
8901
8902    //
8903    // C++:  Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
8904    //
8905
8906    public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize) {
8907        return new Rect(getValidDisparityROI_0(roi1.x, roi1.y, roi1.width, roi1.height, roi2.x, roi2.y, roi2.width, roi2.height, minDisparity, numberOfDisparities, blockSize));
8908    }
8909
8910
8911    //
8912    // C++:  void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
8913    //
8914
8915    public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp) {
8916        validateDisparity_0(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities, disp12MaxDisp);
8917    }
8918
8919    public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities) {
8920        validateDisparity_1(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities);
8921    }
8922
8923
8924    //
8925    // C++:  void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
8926    //
8927
8928    /**
8929     * Reprojects a disparity image to 3D space.
8930     *
8931     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
8932     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
8933     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
8934     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
8935     * being used here.
8936     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
8937     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
8938     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
8939     * camera's rectified coordinate system.
8940     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
8941     * REF: stereoRectify.
8942     * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
8943     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
8944     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
8945     * to 3D points with a very large Z value (currently set to 10000).
8946     * @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
8947     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
8948     *
8949     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
8950     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
8951     * computes:
8952     *
8953     * \(\begin{bmatrix}
8954     * X \\
8955     * Y \\
8956     * Z \\
8957     * W
8958     * \end{bmatrix} = Q \begin{bmatrix}
8959     * x \\
8960     * y \\
8961     * \texttt{disparity} (x,y) \\
8962     * z
8963     * \end{bmatrix}.\)
8964     *
8965     * SEE:
8966     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
8967     */
8968    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth) {
8969        reprojectImageTo3D_0(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues, ddepth);
8970    }
8971
8972    /**
8973     * Reprojects a disparity image to 3D space.
8974     *
8975     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
8976     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
8977     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
8978     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
8979     * being used here.
8980     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
8981     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
8982     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
8983     * camera's rectified coordinate system.
8984     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
8985     * REF: stereoRectify.
8986     * @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
8987     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
8988     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
8989     * to 3D points with a very large Z value (currently set to 10000).
8990     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
8991     *
8992     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
8993     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
8994     * computes:
8995     *
8996     * \(\begin{bmatrix}
8997     * X \\
8998     * Y \\
8999     * Z \\
9000     * W
9001     * \end{bmatrix} = Q \begin{bmatrix}
9002     * x \\
9003     * y \\
9004     * \texttt{disparity} (x,y) \\
9005     * z
9006     * \end{bmatrix}.\)
9007     *
9008     * SEE:
9009     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
9010     */
9011    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues) {
9012        reprojectImageTo3D_1(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues);
9013    }
9014
9015    /**
9016     * Reprojects a disparity image to 3D space.
9017     *
9018     * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
9019     * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
9020     * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or
9021     * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
9022     * being used here.
9023     * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
9024     * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
9025     * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first
9026     * camera's rectified coordinate system.
9027     * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with
9028     * REF: stereoRectify.
9029     * points where the disparity was not computed). If handleMissingValues=true, then pixels with the
9030     * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
9031     * to 3D points with a very large Z value (currently set to 10000).
9032     * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
9033     *
9034     * The function transforms a single-channel disparity map to a 3-channel image representing a 3D
9035     * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
9036     * computes:
9037     *
9038     * \(\begin{bmatrix}
9039     * X \\
9040     * Y \\
9041     * Z \\
9042     * W
9043     * \end{bmatrix} = Q \begin{bmatrix}
9044     * x \\
9045     * y \\
9046     * \texttt{disparity} (x,y) \\
9047     * z
9048     * \end{bmatrix}.\)
9049     *
9050     * SEE:
9051     *    To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
9052     */
9053    public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q) {
9054        reprojectImageTo3D_2(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj);
9055    }
9056
9057
9058    //
9059    // C++:  double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
9060    //
9061
9062    /**
9063     * Calculates the Sampson Distance between two points.
9064     *
9065     * The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
9066     * \(
9067     * sd( \texttt{pt1} , \texttt{pt2} )=
9068     * \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
9069     * {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
9070     * ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
9071     * ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
9072     * ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
9073     * \)
9074     * The fundamental matrix may be calculated using the #findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details.
9075     * @param pt1 first homogeneous 2d point
9076     * @param pt2 second homogeneous 2d point
9077     * @param F fundamental matrix
9078     * @return The computed Sampson distance.
9079     */
9080    public static double sampsonDistance(Mat pt1, Mat pt2, Mat F) {
9081        return sampsonDistance_0(pt1.nativeObj, pt2.nativeObj, F.nativeObj);
9082    }
9083
9084
9085    //
9086    // C++:  int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
9087    //
9088
9089    /**
9090     * Computes an optimal affine transformation between two 3D point sets.
9091     *
9092     * It computes
9093     * \(
9094     * \begin{bmatrix}
9095     * x\\
9096     * y\\
9097     * z\\
9098     * \end{bmatrix}
9099     * =
9100     * \begin{bmatrix}
9101     * a_{11} &amp; a_{12} &amp; a_{13}\\
9102     * a_{21} &amp; a_{22} &amp; a_{23}\\
9103     * a_{31} &amp; a_{32} &amp; a_{33}\\
9104     * \end{bmatrix}
9105     * \begin{bmatrix}
9106     * X\\
9107     * Y\\
9108     * Z\\
9109     * \end{bmatrix}
9110     * +
9111     * \begin{bmatrix}
9112     * b_1\\
9113     * b_2\\
9114     * b_3\\
9115     * \end{bmatrix}
9116     * \)
9117     *
9118     * @param src First input 3D point set containing \((X,Y,Z)\).
9119     * @param dst Second input 3D point set containing \((x,y,z)\).
9120     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9121     * \(
9122     * \begin{bmatrix}
9123     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9124     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9125     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9126     * \end{bmatrix}
9127     * \)
9128     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9129     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9130     * an inlier.
9131     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9132     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9133     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9134     *
9135     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9136     * RANSAC algorithm.
9137     * @return automatically generated
9138     */
9139    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) {
9140        return estimateAffine3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence);
9141    }
9142
9143    /**
9144     * Computes an optimal affine transformation between two 3D point sets.
9145     *
9146     * It computes
9147     * \(
9148     * \begin{bmatrix}
9149     * x\\
9150     * y\\
9151     * z\\
9152     * \end{bmatrix}
9153     * =
9154     * \begin{bmatrix}
9155     * a_{11} &amp; a_{12} &amp; a_{13}\\
9156     * a_{21} &amp; a_{22} &amp; a_{23}\\
9157     * a_{31} &amp; a_{32} &amp; a_{33}\\
9158     * \end{bmatrix}
9159     * \begin{bmatrix}
9160     * X\\
9161     * Y\\
9162     * Z\\
9163     * \end{bmatrix}
9164     * +
9165     * \begin{bmatrix}
9166     * b_1\\
9167     * b_2\\
9168     * b_3\\
9169     * \end{bmatrix}
9170     * \)
9171     *
9172     * @param src First input 3D point set containing \((X,Y,Z)\).
9173     * @param dst Second input 3D point set containing \((x,y,z)\).
9174     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9175     * \(
9176     * \begin{bmatrix}
9177     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9178     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9179     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9180     * \end{bmatrix}
9181     * \)
9182     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9183     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9184     * an inlier.
9185     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9186     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9187     *
9188     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9189     * RANSAC algorithm.
9190     * @return automatically generated
9191     */
9192    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) {
9193        return estimateAffine3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold);
9194    }
9195
9196    /**
9197     * Computes an optimal affine transformation between two 3D point sets.
9198     *
9199     * It computes
9200     * \(
9201     * \begin{bmatrix}
9202     * x\\
9203     * y\\
9204     * z\\
9205     * \end{bmatrix}
9206     * =
9207     * \begin{bmatrix}
9208     * a_{11} &amp; a_{12} &amp; a_{13}\\
9209     * a_{21} &amp; a_{22} &amp; a_{23}\\
9210     * a_{31} &amp; a_{32} &amp; a_{33}\\
9211     * \end{bmatrix}
9212     * \begin{bmatrix}
9213     * X\\
9214     * Y\\
9215     * Z\\
9216     * \end{bmatrix}
9217     * +
9218     * \begin{bmatrix}
9219     * b_1\\
9220     * b_2\\
9221     * b_3\\
9222     * \end{bmatrix}
9223     * \)
9224     *
9225     * @param src First input 3D point set containing \((X,Y,Z)\).
9226     * @param dst Second input 3D point set containing \((x,y,z)\).
9227     * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form
9228     * \(
9229     * \begin{bmatrix}
9230     * a_{11} &amp; a_{12} &amp; a_{13} &amp; b_1\\
9231     * a_{21} &amp; a_{22} &amp; a_{23} &amp; b_2\\
9232     * a_{31} &amp; a_{32} &amp; a_{33} &amp; b_3\\
9233     * \end{bmatrix}
9234     * \)
9235     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9236     * an inlier.
9237     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9238     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9239     *
9240     * The function estimates an optimal 3D affine transformation between two 3D point sets using the
9241     * RANSAC algorithm.
9242     * @return automatically generated
9243     */
9244    public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers) {
9245        return estimateAffine3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj);
9246    }
9247
9248
9249    //
9250    // C++:  Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true)
9251    //
9252
9253    /**
9254     * Computes an optimal affine transformation between two 3D point sets.
9255     *
9256     * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
9257     * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
9258     * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
9259     * The estimated affine transform has a homogeneous scale which is a subclass of affine
9260     * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
9261     * points each.
9262     *
9263     * @param src First input 3D point set.
9264     * @param dst Second input 3D point set.
9265     * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
9266     * Else the pointed-to variable will be set to the optimal scale.
9267     * @param force_rotation If true, the returned rotation will never be a reflection.
9268     * This might be unwanted, e.g. when optimizing a transform between a right- and a
9269     * left-handed coordinate system.
9270     * @return 3D affine transformation matrix \(3 \times 4\) of the form
9271     * \(T =
9272     * \begin{bmatrix}
9273     * R &amp; t\\
9274     * \end{bmatrix}
9275     * \)
9276     */
9277    public static Mat estimateAffine3D(Mat src, Mat dst, double[] scale, boolean force_rotation) {
9278        double[] scale_out = new double[1];
9279        Mat retVal = new Mat(estimateAffine3D_3(src.nativeObj, dst.nativeObj, scale_out, force_rotation));
9280        if(scale!=null) scale[0] = (double)scale_out[0];
9281        return retVal;
9282    }
9283
9284    /**
9285     * Computes an optimal affine transformation between two 3D point sets.
9286     *
9287     * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
9288     * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
9289     * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
9290     * The estimated affine transform has a homogeneous scale which is a subclass of affine
9291     * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
9292     * points each.
9293     *
9294     * @param src First input 3D point set.
9295     * @param dst Second input 3D point set.
9296     * @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
9297     * Else the pointed-to variable will be set to the optimal scale.
9298     * This might be unwanted, e.g. when optimizing a transform between a right- and a
9299     * left-handed coordinate system.
9300     * @return 3D affine transformation matrix \(3 \times 4\) of the form
9301     * \(T =
9302     * \begin{bmatrix}
9303     * R &amp; t\\
9304     * \end{bmatrix}
9305     * \)
9306     */
9307    public static Mat estimateAffine3D(Mat src, Mat dst, double[] scale) {
9308        double[] scale_out = new double[1];
9309        Mat retVal = new Mat(estimateAffine3D_4(src.nativeObj, dst.nativeObj, scale_out));
9310        if(scale!=null) scale[0] = (double)scale_out[0];
9311        return retVal;
9312    }
9313
9314    /**
9315     * Computes an optimal affine transformation between two 3D point sets.
9316     *
9317     * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \)
9318     * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a
9319     * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
9320     * The estimated affine transform has a homogeneous scale which is a subclass of affine
9321     * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
9322     * points each.
9323     *
9324     * @param src First input 3D point set.
9325     * @param dst Second input 3D point set.
9326     * Else the pointed-to variable will be set to the optimal scale.
9327     * This might be unwanted, e.g. when optimizing a transform between a right- and a
9328     * left-handed coordinate system.
9329     * @return 3D affine transformation matrix \(3 \times 4\) of the form
9330     * \(T =
9331     * \begin{bmatrix}
9332     * R &amp; t\\
9333     * \end{bmatrix}
9334     * \)
9335     */
9336    public static Mat estimateAffine3D(Mat src, Mat dst) {
9337        return new Mat(estimateAffine3D_5(src.nativeObj, dst.nativeObj));
9338    }
9339
9340
9341    //
9342    // C++:  int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
9343    //
9344
9345    /**
9346     * Computes an optimal translation between two 3D point sets.
9347     *
9348     * It computes
9349     * \(
9350     * \begin{bmatrix}
9351     * x\\
9352     * y\\
9353     * z\\
9354     * \end{bmatrix}
9355     * =
9356     * \begin{bmatrix}
9357     * X\\
9358     * Y\\
9359     * Z\\
9360     * \end{bmatrix}
9361     * +
9362     * \begin{bmatrix}
9363     * b_1\\
9364     * b_2\\
9365     * b_3\\
9366     * \end{bmatrix}
9367     * \)
9368     *
9369     * @param src First input 3D point set containing \((X,Y,Z)\).
9370     * @param dst Second input 3D point set containing \((x,y,z)\).
9371     * @param out Output 3D translation vector \(3 \times 1\) of the form
9372     * \(
9373     * \begin{bmatrix}
9374     * b_1 \\
9375     * b_2 \\
9376     * b_3 \\
9377     * \end{bmatrix}
9378     * \)
9379     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9380     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9381     * an inlier.
9382     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9383     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9384     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9385     *
9386     * The function estimates an optimal 3D translation between two 3D point sets using the
9387     * RANSAC algorithm.
9388     *
9389     * @return automatically generated
9390     */
9391    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) {
9392        return estimateTranslation3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence);
9393    }
9394
9395    /**
9396     * Computes an optimal translation between two 3D point sets.
9397     *
9398     * It computes
9399     * \(
9400     * \begin{bmatrix}
9401     * x\\
9402     * y\\
9403     * z\\
9404     * \end{bmatrix}
9405     * =
9406     * \begin{bmatrix}
9407     * X\\
9408     * Y\\
9409     * Z\\
9410     * \end{bmatrix}
9411     * +
9412     * \begin{bmatrix}
9413     * b_1\\
9414     * b_2\\
9415     * b_3\\
9416     * \end{bmatrix}
9417     * \)
9418     *
9419     * @param src First input 3D point set containing \((X,Y,Z)\).
9420     * @param dst Second input 3D point set containing \((x,y,z)\).
9421     * @param out Output 3D translation vector \(3 \times 1\) of the form
9422     * \(
9423     * \begin{bmatrix}
9424     * b_1 \\
9425     * b_2 \\
9426     * b_3 \\
9427     * \end{bmatrix}
9428     * \)
9429     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9430     * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
9431     * an inlier.
9432     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9433     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9434     *
9435     * The function estimates an optimal 3D translation between two 3D point sets using the
9436     * RANSAC algorithm.
9437     *
9438     * @return automatically generated
9439     */
9440    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) {
9441        return estimateTranslation3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold);
9442    }
9443
9444    /**
9445     * Computes an optimal translation between two 3D point sets.
9446     *
9447     * It computes
9448     * \(
9449     * \begin{bmatrix}
9450     * x\\
9451     * y\\
9452     * z\\
9453     * \end{bmatrix}
9454     * =
9455     * \begin{bmatrix}
9456     * X\\
9457     * Y\\
9458     * Z\\
9459     * \end{bmatrix}
9460     * +
9461     * \begin{bmatrix}
9462     * b_1\\
9463     * b_2\\
9464     * b_3\\
9465     * \end{bmatrix}
9466     * \)
9467     *
9468     * @param src First input 3D point set containing \((X,Y,Z)\).
9469     * @param dst Second input 3D point set containing \((x,y,z)\).
9470     * @param out Output 3D translation vector \(3 \times 1\) of the form
9471     * \(
9472     * \begin{bmatrix}
9473     * b_1 \\
9474     * b_2 \\
9475     * b_3 \\
9476     * \end{bmatrix}
9477     * \)
9478     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9479     * an inlier.
9480     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9481     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9482     *
9483     * The function estimates an optimal 3D translation between two 3D point sets using the
9484     * RANSAC algorithm.
9485     *
9486     * @return automatically generated
9487     */
9488    public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers) {
9489        return estimateTranslation3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj);
9490    }
9491
9492
9493    //
9494    // C++:  Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
9495    //
9496
9497    /**
9498     * Computes an optimal affine transformation between two 2D point sets.
9499     *
9500     * It computes
9501     * \(
9502     * \begin{bmatrix}
9503     * x\\
9504     * y\\
9505     * \end{bmatrix}
9506     * =
9507     * \begin{bmatrix}
9508     * a_{11} &amp; a_{12}\\
9509     * a_{21} &amp; a_{22}\\
9510     * \end{bmatrix}
9511     * \begin{bmatrix}
9512     * X\\
9513     * Y\\
9514     * \end{bmatrix}
9515     * +
9516     * \begin{bmatrix}
9517     * b_1\\
9518     * b_2\\
9519     * \end{bmatrix}
9520     * \)
9521     *
9522     * @param from First input 2D point set containing \((X,Y)\).
9523     * @param to Second input 2D point set containing \((x,y)\).
9524     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9525     * @param method Robust method used to compute transformation. The following methods are possible:
9526     * <ul>
9527     *   <li>
9528     *    REF: RANSAC - RANSAC-based robust method
9529     *   </li>
9530     *   <li>
9531     *    REF: LMEDS - Least-Median robust method
9532     * RANSAC is the default method.
9533     *   </li>
9534     * </ul>
9535     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
9536     * a point as an inlier. Applies only to RANSAC.
9537     * @param maxIters The maximum number of robust method iterations.
9538     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9539     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9540     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9541     * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
9542     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9543     *
9544     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9545     * could not be estimated. The returned matrix has the following form:
9546     * \(
9547     * \begin{bmatrix}
9548     * a_{11} &amp; a_{12} &amp; b_1\\
9549     * a_{21} &amp; a_{22} &amp; b_2\\
9550     * \end{bmatrix}
9551     * \)
9552     *
9553     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9554     * selected robust algorithm.
9555     *
9556     * The computed transformation is then refined further (using only inliers) with the
9557     * Levenberg-Marquardt method to reduce the re-projection error even more.
9558     *
9559     * <b>Note:</b>
9560     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9561     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9562     * correctly only when there are more than 50% of inliers.
9563     *
9564     * SEE: estimateAffinePartial2D, getAffineTransform
9565     */
9566    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) {
9567        return new Mat(estimateAffine2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters));
9568    }
9569
9570    /**
9571     * Computes an optimal affine transformation between two 2D point sets.
9572     *
9573     * It computes
9574     * \(
9575     * \begin{bmatrix}
9576     * x\\
9577     * y\\
9578     * \end{bmatrix}
9579     * =
9580     * \begin{bmatrix}
9581     * a_{11} &amp; a_{12}\\
9582     * a_{21} &amp; a_{22}\\
9583     * \end{bmatrix}
9584     * \begin{bmatrix}
9585     * X\\
9586     * Y\\
9587     * \end{bmatrix}
9588     * +
9589     * \begin{bmatrix}
9590     * b_1\\
9591     * b_2\\
9592     * \end{bmatrix}
9593     * \)
9594     *
9595     * @param from First input 2D point set containing \((X,Y)\).
9596     * @param to Second input 2D point set containing \((x,y)\).
9597     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9598     * @param method Robust method used to compute transformation. The following methods are possible:
9599     * <ul>
9600     *   <li>
9601     *    REF: RANSAC - RANSAC-based robust method
9602     *   </li>
9603     *   <li>
9604     *    REF: LMEDS - Least-Median robust method
9605     * RANSAC is the default method.
9606     *   </li>
9607     * </ul>
9608     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
9609     * a point as an inlier. Applies only to RANSAC.
9610     * @param maxIters The maximum number of robust method iterations.
9611     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
9612     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9613     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9614     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9615     *
9616     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9617     * could not be estimated. The returned matrix has the following form:
9618     * \(
9619     * \begin{bmatrix}
9620     * a_{11} &amp; a_{12} &amp; b_1\\
9621     * a_{21} &amp; a_{22} &amp; b_2\\
9622     * \end{bmatrix}
9623     * \)
9624     *
9625     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9626     * selected robust algorithm.
9627     *
9628     * The computed transformation is then refined further (using only inliers) with the
9629     * Levenberg-Marquardt method to reduce the re-projection error even more.
9630     *
9631     * <b>Note:</b>
9632     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9633     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9634     * correctly only when there are more than 50% of inliers.
9635     *
9636     * SEE: estimateAffinePartial2D, getAffineTransform
9637     */
9638    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) {
9639        return new Mat(estimateAffine2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence));
9640    }
9641
9642    /**
9643     * Computes an optimal affine transformation between two 2D point sets.
9644     *
9645     * It computes
9646     * \(
9647     * \begin{bmatrix}
9648     * x\\
9649     * y\\
9650     * \end{bmatrix}
9651     * =
9652     * \begin{bmatrix}
9653     * a_{11} &amp; a_{12}\\
9654     * a_{21} &amp; a_{22}\\
9655     * \end{bmatrix}
9656     * \begin{bmatrix}
9657     * X\\
9658     * Y\\
9659     * \end{bmatrix}
9660     * +
9661     * \begin{bmatrix}
9662     * b_1\\
9663     * b_2\\
9664     * \end{bmatrix}
9665     * \)
9666     *
9667     * @param from First input 2D point set containing \((X,Y)\).
9668     * @param to Second input 2D point set containing \((x,y)\).
9669     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9670     * @param method Robust method used to compute transformation. The following methods are possible:
9671     * <ul>
9672     *   <li>
9673     *    REF: RANSAC - RANSAC-based robust method
9674     *   </li>
9675     *   <li>
9676     *    REF: LMEDS - Least-Median robust method
9677     * RANSAC is the default method.
9678     *   </li>
9679     * </ul>
9680     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
9681     * a point as an inlier. Applies only to RANSAC.
9682     * @param maxIters The maximum number of robust method iterations.
9683     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9684     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9685     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9686     *
9687     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9688     * could not be estimated. The returned matrix has the following form:
9689     * \(
9690     * \begin{bmatrix}
9691     * a_{11} &amp; a_{12} &amp; b_1\\
9692     * a_{21} &amp; a_{22} &amp; b_2\\
9693     * \end{bmatrix}
9694     * \)
9695     *
9696     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9697     * selected robust algorithm.
9698     *
9699     * The computed transformation is then refined further (using only inliers) with the
9700     * Levenberg-Marquardt method to reduce the re-projection error even more.
9701     *
9702     * <b>Note:</b>
9703     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9704     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9705     * correctly only when there are more than 50% of inliers.
9706     *
9707     * SEE: estimateAffinePartial2D, getAffineTransform
9708     */
9709    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) {
9710        return new Mat(estimateAffine2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters));
9711    }
9712
9713    /**
9714     * Computes an optimal affine transformation between two 2D point sets.
9715     *
9716     * It computes
9717     * \(
9718     * \begin{bmatrix}
9719     * x\\
9720     * y\\
9721     * \end{bmatrix}
9722     * =
9723     * \begin{bmatrix}
9724     * a_{11} &amp; a_{12}\\
9725     * a_{21} &amp; a_{22}\\
9726     * \end{bmatrix}
9727     * \begin{bmatrix}
9728     * X\\
9729     * Y\\
9730     * \end{bmatrix}
9731     * +
9732     * \begin{bmatrix}
9733     * b_1\\
9734     * b_2\\
9735     * \end{bmatrix}
9736     * \)
9737     *
9738     * @param from First input 2D point set containing \((X,Y)\).
9739     * @param to Second input 2D point set containing \((x,y)\).
9740     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9741     * @param method Robust method used to compute transformation. The following methods are possible:
9742     * <ul>
9743     *   <li>
9744     *    REF: RANSAC - RANSAC-based robust method
9745     *   </li>
9746     *   <li>
9747     *    REF: LMEDS - Least-Median robust method
9748     * RANSAC is the default method.
9749     *   </li>
9750     * </ul>
9751     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
9752     * a point as an inlier. Applies only to RANSAC.
9753     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9754     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9755     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9756     *
9757     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9758     * could not be estimated. The returned matrix has the following form:
9759     * \(
9760     * \begin{bmatrix}
9761     * a_{11} &amp; a_{12} &amp; b_1\\
9762     * a_{21} &amp; a_{22} &amp; b_2\\
9763     * \end{bmatrix}
9764     * \)
9765     *
9766     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9767     * selected robust algorithm.
9768     *
9769     * The computed transformation is then refined further (using only inliers) with the
9770     * Levenberg-Marquardt method to reduce the re-projection error even more.
9771     *
9772     * <b>Note:</b>
9773     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9774     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9775     * correctly only when there are more than 50% of inliers.
9776     *
9777     * SEE: estimateAffinePartial2D, getAffineTransform
9778     */
9779    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) {
9780        return new Mat(estimateAffine2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold));
9781    }
9782
9783    /**
9784     * Computes an optimal affine transformation between two 2D point sets.
9785     *
9786     * It computes
9787     * \(
9788     * \begin{bmatrix}
9789     * x\\
9790     * y\\
9791     * \end{bmatrix}
9792     * =
9793     * \begin{bmatrix}
9794     * a_{11} &amp; a_{12}\\
9795     * a_{21} &amp; a_{22}\\
9796     * \end{bmatrix}
9797     * \begin{bmatrix}
9798     * X\\
9799     * Y\\
9800     * \end{bmatrix}
9801     * +
9802     * \begin{bmatrix}
9803     * b_1\\
9804     * b_2\\
9805     * \end{bmatrix}
9806     * \)
9807     *
9808     * @param from First input 2D point set containing \((X,Y)\).
9809     * @param to Second input 2D point set containing \((x,y)\).
9810     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9811     * @param method Robust method used to compute transformation. The following methods are possible:
9812     * <ul>
9813     *   <li>
9814     *    REF: RANSAC - RANSAC-based robust method
9815     *   </li>
9816     *   <li>
9817     *    REF: LMEDS - Least-Median robust method
9818     * RANSAC is the default method.
9819     *   </li>
9820     * </ul>
9821     * a point as an inlier. Applies only to RANSAC.
9822     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9823     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9824     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9825     *
9826     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9827     * could not be estimated. The returned matrix has the following form:
9828     * \(
9829     * \begin{bmatrix}
9830     * a_{11} &amp; a_{12} &amp; b_1\\
9831     * a_{21} &amp; a_{22} &amp; b_2\\
9832     * \end{bmatrix}
9833     * \)
9834     *
9835     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9836     * selected robust algorithm.
9837     *
9838     * The computed transformation is then refined further (using only inliers) with the
9839     * Levenberg-Marquardt method to reduce the re-projection error even more.
9840     *
9841     * <b>Note:</b>
9842     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9843     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9844     * correctly only when there are more than 50% of inliers.
9845     *
9846     * SEE: estimateAffinePartial2D, getAffineTransform
9847     */
9848    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method) {
9849        return new Mat(estimateAffine2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method));
9850    }
9851
9852    /**
9853     * Computes an optimal affine transformation between two 2D point sets.
9854     *
9855     * It computes
9856     * \(
9857     * \begin{bmatrix}
9858     * x\\
9859     * y\\
9860     * \end{bmatrix}
9861     * =
9862     * \begin{bmatrix}
9863     * a_{11} &amp; a_{12}\\
9864     * a_{21} &amp; a_{22}\\
9865     * \end{bmatrix}
9866     * \begin{bmatrix}
9867     * X\\
9868     * Y\\
9869     * \end{bmatrix}
9870     * +
9871     * \begin{bmatrix}
9872     * b_1\\
9873     * b_2\\
9874     * \end{bmatrix}
9875     * \)
9876     *
9877     * @param from First input 2D point set containing \((X,Y)\).
9878     * @param to Second input 2D point set containing \((x,y)\).
9879     * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
9880     * <ul>
9881     *   <li>
9882     *    REF: RANSAC - RANSAC-based robust method
9883     *   </li>
9884     *   <li>
9885     *    REF: LMEDS - Least-Median robust method
9886     * RANSAC is the default method.
9887     *   </li>
9888     * </ul>
9889     * a point as an inlier. Applies only to RANSAC.
9890     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9891     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9892     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9893     *
9894     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9895     * could not be estimated. The returned matrix has the following form:
9896     * \(
9897     * \begin{bmatrix}
9898     * a_{11} &amp; a_{12} &amp; b_1\\
9899     * a_{21} &amp; a_{22} &amp; b_2\\
9900     * \end{bmatrix}
9901     * \)
9902     *
9903     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9904     * selected robust algorithm.
9905     *
9906     * The computed transformation is then refined further (using only inliers) with the
9907     * Levenberg-Marquardt method to reduce the re-projection error even more.
9908     *
9909     * <b>Note:</b>
9910     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9911     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9912     * correctly only when there are more than 50% of inliers.
9913     *
9914     * SEE: estimateAffinePartial2D, getAffineTransform
9915     */
9916    public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers) {
9917        return new Mat(estimateAffine2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj));
9918    }
9919
9920    /**
9921     * Computes an optimal affine transformation between two 2D point sets.
9922     *
9923     * It computes
9924     * \(
9925     * \begin{bmatrix}
9926     * x\\
9927     * y\\
9928     * \end{bmatrix}
9929     * =
9930     * \begin{bmatrix}
9931     * a_{11} &amp; a_{12}\\
9932     * a_{21} &amp; a_{22}\\
9933     * \end{bmatrix}
9934     * \begin{bmatrix}
9935     * X\\
9936     * Y\\
9937     * \end{bmatrix}
9938     * +
9939     * \begin{bmatrix}
9940     * b_1\\
9941     * b_2\\
9942     * \end{bmatrix}
9943     * \)
9944     *
9945     * @param from First input 2D point set containing \((X,Y)\).
9946     * @param to Second input 2D point set containing \((x,y)\).
9947     * <ul>
9948     *   <li>
9949     *    REF: RANSAC - RANSAC-based robust method
9950     *   </li>
9951     *   <li>
9952     *    REF: LMEDS - Least-Median robust method
9953     * RANSAC is the default method.
9954     *   </li>
9955     * </ul>
9956     * a point as an inlier. Applies only to RANSAC.
9957     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
9958     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
9959     * Passing 0 will disable refining, so the output matrix will be output of robust method.
9960     *
9961     * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation
9962     * could not be estimated. The returned matrix has the following form:
9963     * \(
9964     * \begin{bmatrix}
9965     * a_{11} &amp; a_{12} &amp; b_1\\
9966     * a_{21} &amp; a_{22} &amp; b_2\\
9967     * \end{bmatrix}
9968     * \)
9969     *
9970     * The function estimates an optimal 2D affine transformation between two 2D point sets using the
9971     * selected robust algorithm.
9972     *
9973     * The computed transformation is then refined further (using only inliers) with the
9974     * Levenberg-Marquardt method to reduce the re-projection error even more.
9975     *
9976     * <b>Note:</b>
9977     * The RANSAC method can handle practically any ratio of outliers but needs a threshold to
9978     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
9979     * correctly only when there are more than 50% of inliers.
9980     *
9981     * SEE: estimateAffinePartial2D, getAffineTransform
9982     */
9983    public static Mat estimateAffine2D(Mat from, Mat to) {
9984        return new Mat(estimateAffine2D_6(from.nativeObj, to.nativeObj));
9985    }
9986
9987
9988    //
9989    // C++:  Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
9990    //
9991
9992    public static Mat estimateAffine2D(Mat pts1, Mat pts2, Mat inliers, UsacParams params) {
9993        return new Mat(estimateAffine2D_7(pts1.nativeObj, pts2.nativeObj, inliers.nativeObj, params.nativeObj));
9994    }
9995
9996
9997    //
9998    // C++:  Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
9999    //
10000
10001    /**
10002     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10003     * two 2D point sets.
10004     *
10005     * @param from First input 2D point set.
10006     * @param to Second input 2D point set.
10007     * @param inliers Output vector indicating which points are inliers.
10008     * @param method Robust method used to compute transformation. The following methods are possible:
10009     * <ul>
10010     *   <li>
10011     *    REF: RANSAC - RANSAC-based robust method
10012     *   </li>
10013     *   <li>
10014     *    REF: LMEDS - Least-Median robust method
10015     * RANSAC is the default method.
10016     *   </li>
10017     * </ul>
10018     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10019     * a point as an inlier. Applies only to RANSAC.
10020     * @param maxIters The maximum number of robust method iterations.
10021     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10022     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10023     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10024     * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
10025     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10026     *
10027     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10028     * empty matrix if transformation could not be estimated.
10029     *
10030     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10031     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10032     * estimation.
10033     *
10034     * The computed transformation is then refined further (using only inliers) with the
10035     * Levenberg-Marquardt method to reduce the re-projection error even more.
10036     *
10037     * Estimated transformation matrix is:
10038     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10039     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10040     * \end{bmatrix} \)
10041     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10042     * translations in \( x, y \) axes respectively.
10043     *
10044     * <b>Note:</b>
10045     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10046     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10047     * correctly only when there are more than 50% of inliers.
10048     *
10049     * SEE: estimateAffine2D, getAffineTransform
10050     */
10051    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) {
10052        return new Mat(estimateAffinePartial2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters));
10053    }
10054
10055    /**
10056     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10057     * two 2D point sets.
10058     *
10059     * @param from First input 2D point set.
10060     * @param to Second input 2D point set.
10061     * @param inliers Output vector indicating which points are inliers.
10062     * @param method Robust method used to compute transformation. The following methods are possible:
10063     * <ul>
10064     *   <li>
10065     *    REF: RANSAC - RANSAC-based robust method
10066     *   </li>
10067     *   <li>
10068     *    REF: LMEDS - Least-Median robust method
10069     * RANSAC is the default method.
10070     *   </li>
10071     * </ul>
10072     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10073     * a point as an inlier. Applies only to RANSAC.
10074     * @param maxIters The maximum number of robust method iterations.
10075     * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
10076     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10077     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10078     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10079     *
10080     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10081     * empty matrix if transformation could not be estimated.
10082     *
10083     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10084     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10085     * estimation.
10086     *
10087     * The computed transformation is then refined further (using only inliers) with the
10088     * Levenberg-Marquardt method to reduce the re-projection error even more.
10089     *
10090     * Estimated transformation matrix is:
10091     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10092     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10093     * \end{bmatrix} \)
10094     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10095     * translations in \( x, y \) axes respectively.
10096     *
10097     * <b>Note:</b>
10098     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10099     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10100     * correctly only when there are more than 50% of inliers.
10101     *
10102     * SEE: estimateAffine2D, getAffineTransform
10103     */
10104    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) {
10105        return new Mat(estimateAffinePartial2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence));
10106    }
10107
10108    /**
10109     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10110     * two 2D point sets.
10111     *
10112     * @param from First input 2D point set.
10113     * @param to Second input 2D point set.
10114     * @param inliers Output vector indicating which points are inliers.
10115     * @param method Robust method used to compute transformation. The following methods are possible:
10116     * <ul>
10117     *   <li>
10118     *    REF: RANSAC - RANSAC-based robust method
10119     *   </li>
10120     *   <li>
10121     *    REF: LMEDS - Least-Median robust method
10122     * RANSAC is the default method.
10123     *   </li>
10124     * </ul>
10125     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10126     * a point as an inlier. Applies only to RANSAC.
10127     * @param maxIters The maximum number of robust method iterations.
10128     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10129     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10130     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10131     *
10132     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10133     * empty matrix if transformation could not be estimated.
10134     *
10135     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10136     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10137     * estimation.
10138     *
10139     * The computed transformation is then refined further (using only inliers) with the
10140     * Levenberg-Marquardt method to reduce the re-projection error even more.
10141     *
10142     * Estimated transformation matrix is:
10143     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10144     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10145     * \end{bmatrix} \)
10146     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10147     * translations in \( x, y \) axes respectively.
10148     *
10149     * <b>Note:</b>
10150     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10151     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10152     * correctly only when there are more than 50% of inliers.
10153     *
10154     * SEE: estimateAffine2D, getAffineTransform
10155     */
10156    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) {
10157        return new Mat(estimateAffinePartial2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters));
10158    }
10159
10160    /**
10161     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10162     * two 2D point sets.
10163     *
10164     * @param from First input 2D point set.
10165     * @param to Second input 2D point set.
10166     * @param inliers Output vector indicating which points are inliers.
10167     * @param method Robust method used to compute transformation. The following methods are possible:
10168     * <ul>
10169     *   <li>
10170     *    REF: RANSAC - RANSAC-based robust method
10171     *   </li>
10172     *   <li>
10173     *    REF: LMEDS - Least-Median robust method
10174     * RANSAC is the default method.
10175     *   </li>
10176     * </ul>
10177     * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
10178     * a point as an inlier. Applies only to RANSAC.
10179     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10180     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10181     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10182     *
10183     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10184     * empty matrix if transformation could not be estimated.
10185     *
10186     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10187     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10188     * estimation.
10189     *
10190     * The computed transformation is then refined further (using only inliers) with the
10191     * Levenberg-Marquardt method to reduce the re-projection error even more.
10192     *
10193     * Estimated transformation matrix is:
10194     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10195     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10196     * \end{bmatrix} \)
10197     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10198     * translations in \( x, y \) axes respectively.
10199     *
10200     * <b>Note:</b>
10201     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10202     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10203     * correctly only when there are more than 50% of inliers.
10204     *
10205     * SEE: estimateAffine2D, getAffineTransform
10206     */
10207    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) {
10208        return new Mat(estimateAffinePartial2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold));
10209    }
10210
10211    /**
10212     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10213     * two 2D point sets.
10214     *
10215     * @param from First input 2D point set.
10216     * @param to Second input 2D point set.
10217     * @param inliers Output vector indicating which points are inliers.
10218     * @param method Robust method used to compute transformation. The following methods are possible:
10219     * <ul>
10220     *   <li>
10221     *    REF: RANSAC - RANSAC-based robust method
10222     *   </li>
10223     *   <li>
10224     *    REF: LMEDS - Least-Median robust method
10225     * RANSAC is the default method.
10226     *   </li>
10227     * </ul>
10228     * a point as an inlier. Applies only to RANSAC.
10229     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10230     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10231     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10232     *
10233     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10234     * empty matrix if transformation could not be estimated.
10235     *
10236     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10237     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10238     * estimation.
10239     *
10240     * The computed transformation is then refined further (using only inliers) with the
10241     * Levenberg-Marquardt method to reduce the re-projection error even more.
10242     *
10243     * Estimated transformation matrix is:
10244     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10245     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10246     * \end{bmatrix} \)
10247     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10248     * translations in \( x, y \) axes respectively.
10249     *
10250     * <b>Note:</b>
10251     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10252     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10253     * correctly only when there are more than 50% of inliers.
10254     *
10255     * SEE: estimateAffine2D, getAffineTransform
10256     */
10257    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method) {
10258        return new Mat(estimateAffinePartial2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method));
10259    }
10260
10261    /**
10262     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10263     * two 2D point sets.
10264     *
10265     * @param from First input 2D point set.
10266     * @param to Second input 2D point set.
10267     * @param inliers Output vector indicating which points are inliers.
10268     * <ul>
10269     *   <li>
10270     *    REF: RANSAC - RANSAC-based robust method
10271     *   </li>
10272     *   <li>
10273     *    REF: LMEDS - Least-Median robust method
10274     * RANSAC is the default method.
10275     *   </li>
10276     * </ul>
10277     * a point as an inlier. Applies only to RANSAC.
10278     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10279     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10280     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10281     *
10282     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10283     * empty matrix if transformation could not be estimated.
10284     *
10285     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10286     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10287     * estimation.
10288     *
10289     * The computed transformation is then refined further (using only inliers) with the
10290     * Levenberg-Marquardt method to reduce the re-projection error even more.
10291     *
10292     * Estimated transformation matrix is:
10293     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10294     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10295     * \end{bmatrix} \)
10296     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10297     * translations in \( x, y \) axes respectively.
10298     *
10299     * <b>Note:</b>
10300     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10301     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10302     * correctly only when there are more than 50% of inliers.
10303     *
10304     * SEE: estimateAffine2D, getAffineTransform
10305     */
10306    public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers) {
10307        return new Mat(estimateAffinePartial2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj));
10308    }
10309
10310    /**
10311     * Computes an optimal limited affine transformation with 4 degrees of freedom between
10312     * two 2D point sets.
10313     *
10314     * @param from First input 2D point set.
10315     * @param to Second input 2D point set.
10316     * <ul>
10317     *   <li>
10318     *    REF: RANSAC - RANSAC-based robust method
10319     *   </li>
10320     *   <li>
10321     *    REF: LMEDS - Least-Median robust method
10322     * RANSAC is the default method.
10323     *   </li>
10324     * </ul>
10325     * a point as an inlier. Applies only to RANSAC.
10326     * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
10327     * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
10328     * Passing 0 will disable refining, so the output matrix will be output of robust method.
10329     *
10330     * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or
10331     * empty matrix if transformation could not be estimated.
10332     *
10333     * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
10334     * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
10335     * estimation.
10336     *
10337     * The computed transformation is then refined further (using only inliers) with the
10338     * Levenberg-Marquardt method to reduce the re-projection error even more.
10339     *
10340     * Estimated transformation matrix is:
10341     * \( \begin{bmatrix} \cos(\theta) \cdot s &amp; -\sin(\theta) \cdot s &amp; t_x \\
10342     *                 \sin(\theta) \cdot s &amp; \cos(\theta) \cdot s &amp; t_y
10343     * \end{bmatrix} \)
10344     * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are
10345     * translations in \( x, y \) axes respectively.
10346     *
10347     * <b>Note:</b>
10348     * The RANSAC method can handle practically any ratio of outliers but need a threshold to
10349     * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
10350     * correctly only when there are more than 50% of inliers.
10351     *
10352     * SEE: estimateAffine2D, getAffineTransform
10353     */
10354    public static Mat estimateAffinePartial2D(Mat from, Mat to) {
10355        return new Mat(estimateAffinePartial2D_6(from.nativeObj, to.nativeObj));
10356    }
10357
10358
10359    //
10360    // C++:  int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
10361    //
10362
10363    /**
10364     * Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
10365     *
10366     * @param H The input homography matrix between two images.
10367     * @param K The input camera intrinsic matrix.
10368     * @param rotations Array of rotation matrices.
10369     * @param translations Array of translation matrices.
10370     * @param normals Array of plane normal matrices.
10371     *
10372     * This function extracts relative camera motion between two views of a planar object and returns up to
10373     * four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
10374     * the homography matrix H is described in detail in CITE: Malis2007.
10375     *
10376     * If the homography H, induced by the plane, gives the constraint
10377     * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) on the source image points
10378     * \(p_i\) and the destination image points \(p'_i\), then the tuple of rotations[k] and
10379     * translations[k] is a change of basis from the source camera's coordinate system to the destination
10380     * camera's coordinate system. However, by decomposing H, one can only get the translation normalized
10381     * by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
10382     *
10383     * If point correspondences are available, at least two solutions may further be invalidated, by
10384     * applying positive depth constraint, i.e. all points must be in front of the camera.
10385     * @return automatically generated
10386     */
10387    public static int decomposeHomographyMat(Mat H, Mat K, List<Mat> rotations, List<Mat> translations, List<Mat> normals) {
10388        Mat rotations_mat = new Mat();
10389        Mat translations_mat = new Mat();
10390        Mat normals_mat = new Mat();
10391        int retVal = decomposeHomographyMat_0(H.nativeObj, K.nativeObj, rotations_mat.nativeObj, translations_mat.nativeObj, normals_mat.nativeObj);
10392        Converters.Mat_to_vector_Mat(rotations_mat, rotations);
10393        rotations_mat.release();
10394        Converters.Mat_to_vector_Mat(translations_mat, translations);
10395        translations_mat.release();
10396        Converters.Mat_to_vector_Mat(normals_mat, normals);
10397        normals_mat.release();
10398        return retVal;
10399    }
10400
10401
10402    //
10403    // C++:  void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
10404    //
10405
10406    /**
10407     * Filters homography decompositions based on additional information.
10408     *
10409     * @param rotations Vector of rotation matrices.
10410     * @param normals Vector of plane normal matrices.
10411     * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
10412     * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
10413     * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
10414     * @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
10415     *
10416     * This function is intended to filter the output of the #decomposeHomographyMat based on additional
10417     * information as described in CITE: Malis2007 . The summary of the method: the #decomposeHomographyMat function
10418     * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
10419     * sets of points visible in the camera frame before and after the homography transformation is applied,
10420     * we can determine which are the true potential solutions and which are the opposites by verifying which
10421     * homographies are consistent with all visible reference points being in front of the camera. The inputs
10422     * are left unchanged; the filtered solution set is returned as indices into the existing one.
10423     */
10424    public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions, Mat pointsMask) {
10425        Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations);
10426        Mat normals_mat = Converters.vector_Mat_to_Mat(normals);
10427        filterHomographyDecompByVisibleRefpoints_0(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj, pointsMask.nativeObj);
10428    }
10429
10430    /**
10431     * Filters homography decompositions based on additional information.
10432     *
10433     * @param rotations Vector of rotation matrices.
10434     * @param normals Vector of plane normal matrices.
10435     * @param beforePoints Vector of (rectified) visible reference points before the homography is applied
10436     * @param afterPoints Vector of (rectified) visible reference points after the homography is applied
10437     * @param possibleSolutions Vector of int indices representing the viable solution set after filtering
10438     *
10439     * This function is intended to filter the output of the #decomposeHomographyMat based on additional
10440     * information as described in CITE: Malis2007 . The summary of the method: the #decomposeHomographyMat function
10441     * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
10442     * sets of points visible in the camera frame before and after the homography transformation is applied,
10443     * we can determine which are the true potential solutions and which are the opposites by verifying which
10444     * homographies are consistent with all visible reference points being in front of the camera. The inputs
10445     * are left unchanged; the filtered solution set is returned as indices into the existing one.
10446     */
10447    public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions) {
10448        Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations);
10449        Mat normals_mat = Converters.vector_Mat_to_Mat(normals);
10450        filterHomographyDecompByVisibleRefpoints_1(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj);
10451    }
10452
10453
10454    //
10455    // C++:  void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
10456    //
10457
10458    /**
10459     * Transforms an image to compensate for lens distortion.
10460     *
10461     * The function transforms an image to compensate radial and tangential lens distortion.
10462     *
10463     * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
10464     * (with bilinear interpolation). See the former function for details of the transformation being
10465     * performed.
10466     *
10467     * Those pixels in the destination image, for which there is no correspondent pixels in the source
10468     * image, are filled with zeros (black color).
10469     *
10470     * A particular subset of the source image that will be visible in the corrected image can be regulated
10471     * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
10472     * newCameraMatrix depending on your requirements.
10473     *
10474     * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
10475     * the resolution of images is different from the resolution used at the calibration stage, \(f_x,
10476     * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
10477     * the same.
10478     *
10479     * @param src Input (distorted) image.
10480     * @param dst Output (corrected) image that has the same size and type as src .
10481     * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10482     * @param distCoeffs Input vector of distortion coefficients
10483     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10484     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10485     * @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
10486     * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
10487     */
10488    public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) {
10489        undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj);
10490    }
10491
10492    /**
10493     * Transforms an image to compensate for lens distortion.
10494     *
10495     * The function transforms an image to compensate radial and tangential lens distortion.
10496     *
10497     * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
10498     * (with bilinear interpolation). See the former function for details of the transformation being
10499     * performed.
10500     *
10501     * Those pixels in the destination image, for which there is no correspondent pixels in the source
10502     * image, are filled with zeros (black color).
10503     *
10504     * A particular subset of the source image that will be visible in the corrected image can be regulated
10505     * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
10506     * newCameraMatrix depending on your requirements.
10507     *
10508     * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
10509     * the resolution of images is different from the resolution used at the calibration stage, \(f_x,
10510     * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain
10511     * the same.
10512     *
10513     * @param src Input (distorted) image.
10514     * @param dst Output (corrected) image that has the same size and type as src .
10515     * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10516     * @param distCoeffs Input vector of distortion coefficients
10517     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10518     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10519     * cameraMatrix but you may additionally scale and shift the result by using a different matrix.
10520     */
10521    public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) {
10522        undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj);
10523    }
10524
10525
10526    //
10527    // C++:  void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
10528    //
10529
10530    /**
10531     * Computes the undistortion and rectification transformation map.
10532     *
10533     * The function computes the joint undistortion and rectification transformation and represents the
10534     * result in the form of maps for #remap. The undistorted image looks like original, as if it is
10535     * captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
10536     * monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
10537     * #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
10538     * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
10539     *
10540     * Also, this new camera is oriented differently in the coordinate space, according to R. That, for
10541     * example, helps to align two heads of a stereo camera so that the epipolar lines on both images
10542     * become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
10543     *
10544     * The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
10545     * is, for each pixel \((u, v)\) in the destination (corrected and rectified) image, the function
10546     * computes the corresponding coordinates in the source image (that is, in the original image from
10547     * camera). The following process is applied:
10548     * \(
10549     * \begin{array}{l}
10550     * x  \leftarrow (u - {c'}_x)/{f'}_x  \\
10551     * y  \leftarrow (v - {c'}_y)/{f'}_y  \\
10552     * {[X\,Y\,W]} ^T  \leftarrow R^{-1}*[x \, y \, 1]^T  \\
10553     * x'  \leftarrow X/W  \\
10554     * y'  \leftarrow Y/W  \\
10555     * r^2  \leftarrow x'^2 + y'^2 \\
10556     * x''  \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
10557     * + 2p_1 x' y' + p_2(r^2 + 2 x'^2)  + s_1 r^2 + s_2 r^4\\
10558     * y''  \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
10559     * + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
10560     * s\vecthree{x'''}{y'''}{1} =
10561     * \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
10562     * {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
10563     * {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
10564     * map_x(u,v)  \leftarrow x''' f_x + c_x  \\
10565     * map_y(u,v)  \leftarrow y''' f_y + c_y
10566     * \end{array}
10567     * \)
10568     * where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10569     * are the distortion coefficients.
10570     *
10571     * In case of a stereo camera, this function is called twice: once for each camera head, after
10572     * #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
10573     * was not calibrated, it is still possible to compute the rectification transformations directly from
10574     * the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
10575     * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
10576     * space. R can be computed from H as
10577     * \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\)
10578     * where cameraMatrix can be chosen arbitrarily.
10579     *
10580     * @param cameraMatrix Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10581     * @param distCoeffs Input vector of distortion coefficients
10582     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10583     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10584     * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
10585     * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
10586     * is assumed. In #initUndistortRectifyMap R assumed to be an identity matrix.
10587     * @param newCameraMatrix New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).
10588     * @param size Undistorted image size.
10589     * @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
10590     * @param map1 The first output map.
10591     * @param map2 The second output map.
10592     */
10593    public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) {
10594        initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
10595    }
10596
10597
10598    //
10599    // C++:  void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
10600    //
10601
10602    /**
10603     * Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
10604     * #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
10605     *
10606     * The function computes the joint projection and inverse rectification transformation and represents the
10607     * result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
10608     * once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
10609     * is usually equal to cameraMatrix, or it can be computed by
10610     * #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
10611     * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
10612     *
10613     * The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
10614     * this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
10615     * allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
10616     *
10617     * The function builds the maps for the inverse mapping algorithm that is used by #remap. That
10618     * is, for each pixel \((u, v)\) in the destination (projected and inverse-rectified) image, the function
10619     * computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
10620     *
10621     * \(
10622     * \begin{array}{l}
10623     * \text{newCameraMatrix}\\
10624     * x  \leftarrow (u - {c'}_x)/{f'}_x  \\
10625     * y  \leftarrow (v - {c'}_y)/{f'}_y  \\
10626     *
10627     * \\\text{Undistortion}
10628     * \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
10629     * r^2  \leftarrow x^2 + y^2 \\
10630     * \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
10631     * x' \leftarrow \frac{x}{\theta} \\
10632     * y'  \leftarrow \frac{y}{\theta} \\
10633     *
10634     * \\\text{Rectification}\\
10635     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
10636     * x''  \leftarrow X/W  \\
10637     * y''  \leftarrow Y/W  \\
10638     *
10639     * \\\text{cameraMatrix}\\
10640     * map_x(u,v)  \leftarrow x'' f_x + c_x  \\
10641     * map_y(u,v)  \leftarrow y'' f_y + c_y
10642     * \end{array}
10643     * \)
10644     * where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10645     * are the distortion coefficients vector distCoeffs.
10646     *
10647     * In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
10648     * This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
10649     * is not calibrated, it is still possible to compute the rectification transformations directly from
10650     * the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
10651     * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
10652     * space. R can be computed from H as
10653     * \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\)
10654     * where cameraMatrix can be chosen arbitrarily.
10655     *
10656     * @param cameraMatrix Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10657     * @param distCoeffs Input vector of distortion coefficients
10658     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10659     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10660     * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
10661     * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
10662     * is assumed.
10663     * @param newCameraMatrix New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\).
10664     * @param size Distorted image size.
10665     * @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
10666     * @param map1 The first output map for #remap.
10667     * @param map2 The second output map for #remap.
10668     */
10669    public static void initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) {
10670        initInverseRectificationMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
10671    }
10672
10673
10674    //
10675    // C++:  Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
10676    //
10677
10678    /**
10679     * Returns the default new camera matrix.
10680     *
10681     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
10682     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
10683     *
10684     * In the latter case, the new camera matrix will be:
10685     *
10686     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
10687     *
10688     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
10689     *
10690     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
10691     * move the principal point. However, when you work with stereo, it is important to move the principal
10692     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
10693     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
10694     * each view where the principal points are located at the center.
10695     *
10696     * @param cameraMatrix Input camera matrix.
10697     * @param imgsize Camera view image size in pixels.
10698     * @param centerPrincipalPoint Location of the principal point in the new camera matrix. The
10699     * parameter indicates whether this location should be at the image center or not.
10700     * @return automatically generated
10701     */
10702    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) {
10703        return new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint));
10704    }
10705
10706    /**
10707     * Returns the default new camera matrix.
10708     *
10709     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
10710     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
10711     *
10712     * In the latter case, the new camera matrix will be:
10713     *
10714     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
10715     *
10716     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
10717     *
10718     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
10719     * move the principal point. However, when you work with stereo, it is important to move the principal
10720     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
10721     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
10722     * each view where the principal points are located at the center.
10723     *
10724     * @param cameraMatrix Input camera matrix.
10725     * @param imgsize Camera view image size in pixels.
10726     * parameter indicates whether this location should be at the image center or not.
10727     * @return automatically generated
10728     */
10729    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize) {
10730        return new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj, imgsize.width, imgsize.height));
10731    }
10732
10733    /**
10734     * Returns the default new camera matrix.
10735     *
10736     * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
10737     * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
10738     *
10739     * In the latter case, the new camera matrix will be:
10740     *
10741     * \(\begin{bmatrix} f_x &amp;&amp; 0 &amp;&amp; ( \texttt{imgSize.width} -1)*0.5  \\ 0 &amp;&amp; f_y &amp;&amp; ( \texttt{imgSize.height} -1)*0.5  \\ 0 &amp;&amp; 0 &amp;&amp; 1 \end{bmatrix} ,\)
10742     *
10743     * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively.
10744     *
10745     * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
10746     * move the principal point. However, when you work with stereo, it is important to move the principal
10747     * points in both views to the same y-coordinate (which is required by most of stereo correspondence
10748     * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
10749     * each view where the principal points are located at the center.
10750     *
10751     * @param cameraMatrix Input camera matrix.
10752     * parameter indicates whether this location should be at the image center or not.
10753     * @return automatically generated
10754     */
10755    public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) {
10756        return new Mat(getDefaultNewCameraMatrix_2(cameraMatrix.nativeObj));
10757    }
10758
10759
10760    //
10761    // C++:  void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
10762    //
10763
10764    /**
10765     * Computes the ideal point coordinates from the observed point coordinates.
10766     *
10767     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
10768     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
10769     * to  #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
10770     * planar object, it does, up to a translation vector, if the proper R is specified.
10771     *
10772     * For each observed point coordinate \((u, v)\) the function computes:
10773     * \(
10774     * \begin{array}{l}
10775     * x^{"}  \leftarrow (u - c_x)/f_x  \\
10776     * y^{"}  \leftarrow (v - c_y)/f_y  \\
10777     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
10778     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
10779     * x  \leftarrow X/W  \\
10780     * y  \leftarrow Y/W  \\
10781     * \text{only performed if P is specified:} \\
10782     * u'  \leftarrow x {f'}_x + {c'}_x  \\
10783     * v'  \leftarrow y {f'}_y + {c'}_y
10784     * \end{array}
10785     * \)
10786     *
10787     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
10788     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
10789     * coordinates do not depend on the camera matrix).
10790     *
10791     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
10792     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
10793     * vector&lt;Point2f&gt; ).
10794     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
10795     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
10796     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10797     * @param distCoeffs Input vector of distortion coefficients
10798     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10799     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10800     * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
10801     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
10802     * @param P New camera matrix (3x3) or new projection matrix (3x4) \(\begin{bmatrix} {f'}_x &amp; 0 &amp; {c'}_x &amp; t_x \\ 0 &amp; {f'}_y &amp; {c'}_y &amp; t_y \\ 0 &amp; 0 &amp; 1 &amp; t_z \end{bmatrix}\). P1 or P2 computed by
10803     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
10804     */
10805    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P) {
10806        Mat src_mat = src;
10807        Mat dst_mat = dst;
10808        undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj);
10809    }
10810
10811    /**
10812     * Computes the ideal point coordinates from the observed point coordinates.
10813     *
10814     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
10815     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
10816     * to  #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
10817     * planar object, it does, up to a translation vector, if the proper R is specified.
10818     *
10819     * For each observed point coordinate \((u, v)\) the function computes:
10820     * \(
10821     * \begin{array}{l}
10822     * x^{"}  \leftarrow (u - c_x)/f_x  \\
10823     * y^{"}  \leftarrow (v - c_y)/f_y  \\
10824     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
10825     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
10826     * x  \leftarrow X/W  \\
10827     * y  \leftarrow Y/W  \\
10828     * \text{only performed if P is specified:} \\
10829     * u'  \leftarrow x {f'}_x + {c'}_x  \\
10830     * v'  \leftarrow y {f'}_y + {c'}_y
10831     * \end{array}
10832     * \)
10833     *
10834     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
10835     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
10836     * coordinates do not depend on the camera matrix).
10837     *
10838     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
10839     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
10840     * vector&lt;Point2f&gt; ).
10841     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
10842     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
10843     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10844     * @param distCoeffs Input vector of distortion coefficients
10845     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10846     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10847     * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
10848     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
10849     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
10850     */
10851    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R) {
10852        Mat src_mat = src;
10853        Mat dst_mat = dst;
10854        undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj);
10855    }
10856
10857    /**
10858     * Computes the ideal point coordinates from the observed point coordinates.
10859     *
10860     * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
10861     * sparse set of points instead of a raster image. Also the function performs a reverse transformation
10862     * to  #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
10863     * planar object, it does, up to a translation vector, if the proper R is specified.
10864     *
10865     * For each observed point coordinate \((u, v)\) the function computes:
10866     * \(
10867     * \begin{array}{l}
10868     * x^{"}  \leftarrow (u - c_x)/f_x  \\
10869     * y^{"}  \leftarrow (v - c_y)/f_y  \\
10870     * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
10871     * {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
10872     * x  \leftarrow X/W  \\
10873     * y  \leftarrow Y/W  \\
10874     * \text{only performed if P is specified:} \\
10875     * u'  \leftarrow x {f'}_x + {c'}_x  \\
10876     * v'  \leftarrow y {f'}_y + {c'}_y
10877     * \end{array}
10878     * \)
10879     *
10880     * where *undistort* is an approximate iterative algorithm that estimates the normalized original
10881     * point coordinates out of the normalized distorted point coordinates ("normalized" means that the
10882     * coordinates do not depend on the camera matrix).
10883     *
10884     * The function can be used for both a stereo camera head or a monocular camera (when R is empty).
10885     * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
10886     * vector&lt;Point2f&gt; ).
10887     * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ) after undistortion and reverse perspective
10888     * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
10889     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10890     * @param distCoeffs Input vector of distortion coefficients
10891     * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\)
10892     * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
10893     * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
10894     * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
10895     */
10896    public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) {
10897        Mat src_mat = src;
10898        Mat dst_mat = dst;
10899        undistortPoints_2(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj);
10900    }
10901
10902
10903    //
10904    // C++:  void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
10905    //
10906
10907    /**
10908     *
10909     *     <b>Note:</b> Default version of #undistortPoints does 5 iterations to compute undistorted points.
10910     * @param src automatically generated
10911     * @param dst automatically generated
10912     * @param cameraMatrix automatically generated
10913     * @param distCoeffs automatically generated
10914     * @param R automatically generated
10915     * @param P automatically generated
10916     * @param criteria automatically generated
10917     */
10918    public static void undistortPointsIter(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria) {
10919        undistortPointsIter_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
10920    }
10921
10922
10923    //
10924    // C++:  void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01))
10925    //
10926
10927    /**
10928     * Compute undistorted image points position
10929     *
10930     * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
10931     * CV_64FC2) (or vector&lt;Point2f&gt; ).
10932     * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ).
10933     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10934     * @param distCoeffs Distortion coefficients
10935     * @param arg1 automatically generated
10936     */
10937    public static void undistortImagePoints(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1) {
10938        undistortImagePoints_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, arg1.type, arg1.maxCount, arg1.epsilon);
10939    }
10940
10941    /**
10942     * Compute undistorted image points position
10943     *
10944     * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
10945     * CV_64FC2) (or vector&lt;Point2f&gt; ).
10946     * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector&lt;Point2f&gt; ).
10947     * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) .
10948     * @param distCoeffs Distortion coefficients
10949     */
10950    public static void undistortImagePoints(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) {
10951        undistortImagePoints_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj);
10952    }
10953
10954
10955    //
10956    // C++:  void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
10957    //
10958
10959    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha, Mat jacobian) {
10960        fisheye_projectPoints_0(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha, jacobian.nativeObj);
10961    }
10962
10963    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha) {
10964        fisheye_projectPoints_1(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha);
10965    }
10966
10967    public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D) {
10968        fisheye_projectPoints_2(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj);
10969    }
10970
10971
10972    //
10973    // C++:  void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
10974    //
10975
10976    /**
10977     * Distorts 2D points using fisheye model.
10978     *
10979     *     @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is
10980     *     the number of points in the view.
10981     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
10982     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
10983     *     @param alpha The skew coefficient.
10984     *     @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
10985     *
10986     *     Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
10987     *     This means if you want to distort image points you have to multiply them with \(K^{-1}\).
10988     */
10989    public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D, double alpha) {
10990        fisheye_distortPoints_0(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj, alpha);
10991    }
10992
10993    /**
10994     * Distorts 2D points using fisheye model.
10995     *
10996     *     @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is
10997     *     the number of points in the view.
10998     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
10999     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11000     *     @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11001     *
11002     *     Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
11003     *     This means if you want to distort image points you have to multiply them with \(K^{-1}\).
11004     */
11005    public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D) {
11006        fisheye_distortPoints_1(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj);
11007    }
11008
11009
11010    //
11011    // C++:  void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8))
11012    //
11013
11014    /**
11015     * Undistorts 2D points using fisheye model
11016     *
11017     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11018     *     number of points in the view.
11019     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11020     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11021     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11022     *     1-channel or 1x1 3-channel
11023     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11024     *     @param criteria Termination criteria
11025     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11026     */
11027    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P, TermCriteria criteria) {
11028        fisheye_undistortPoints_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon);
11029    }
11030
11031    /**
11032     * Undistorts 2D points using fisheye model
11033     *
11034     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11035     *     number of points in the view.
11036     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11037     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11038     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11039     *     1-channel or 1x1 3-channel
11040     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11041     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11042     */
11043    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P) {
11044        fisheye_undistortPoints_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj);
11045    }
11046
11047    /**
11048     * Undistorts 2D points using fisheye model
11049     *
11050     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11051     *     number of points in the view.
11052     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11053     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11054     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11055     *     1-channel or 1x1 3-channel
11056     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11057     */
11058    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R) {
11059        fisheye_undistortPoints_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj);
11060    }
11061
11062    /**
11063     * Undistorts 2D points using fisheye model
11064     *
11065     *     @param distorted Array of object points, 1xN/Nx1 2-channel (or vector&lt;Point2f&gt; ), where N is the
11066     *     number of points in the view.
11067     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11068     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11069     *     1-channel or 1x1 3-channel
11070     *     @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector&lt;Point2f&gt; .
11071     */
11072    public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D) {
11073        fisheye_undistortPoints_3(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj);
11074    }
11075
11076
11077    //
11078    // C++:  void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
11079    //
11080
11081    /**
11082     * Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
11083     *     distortion is used, if R or P is empty identity matrixes are used.
11084     *
11085     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11086     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11087     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11088     *     1-channel or 1x1 3-channel
11089     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11090     *     @param size Undistorted image size.
11091     *     @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
11092     *     for details.
11093     *     @param map1 The first output map.
11094     *     @param map2 The second output map.
11095     */
11096    public static void fisheye_initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat map1, Mat map2) {
11097        fisheye_initUndistortRectifyMap_0(K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
11098    }
11099
11100
11101    //
11102    // C++:  void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
11103    //
11104
11105    /**
11106     * Transforms an image to compensate for fisheye lens distortion.
11107     *
11108     *     @param distorted image with fisheye lens distortion.
11109     *     @param undistorted Output image with compensated fisheye lens distortion.
11110     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11111     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11112     *     @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
11113     *     may additionally scale and shift the result by using a different matrix.
11114     *     @param new_size the new size
11115     *
11116     *     The function transforms an image to compensate radial and tangential lens distortion.
11117     *
11118     *     The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
11119     *     (with bilinear interpolation). See the former function for details of the transformation being
11120     *     performed.
11121     *
11122     *     See below the results of undistortImage.
11123     * <ul>
11124     *   <li>
11125     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11126     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11127     *   <ul>
11128     *     <li>
11129     *            b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11130     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11131     *     </li>
11132     *     <li>
11133     *            c\) original image was captured with fisheye lens
11134     *     </li>
11135     *   </ul>
11136     *
11137     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11138     *     of image, we can notice that on image a) these points are distorted.
11139     *   </li>
11140     * </ul>
11141     *
11142     *     ![image](pics/fisheye_undistorted.jpg)
11143     */
11144    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew, Size new_size) {
11145        fisheye_undistortImage_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj, new_size.width, new_size.height);
11146    }
11147
11148    /**
11149     * Transforms an image to compensate for fisheye lens distortion.
11150     *
11151     *     @param distorted image with fisheye lens distortion.
11152     *     @param undistorted Output image with compensated fisheye lens distortion.
11153     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11154     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11155     *     @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
11156     *     may additionally scale and shift the result by using a different matrix.
11157     *
11158     *     The function transforms an image to compensate radial and tangential lens distortion.
11159     *
11160     *     The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
11161     *     (with bilinear interpolation). See the former function for details of the transformation being
11162     *     performed.
11163     *
11164     *     See below the results of undistortImage.
11165     * <ul>
11166     *   <li>
11167     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11168     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11169     *   <ul>
11170     *     <li>
11171     *            b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11172     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11173     *     </li>
11174     *     <li>
11175     *            c\) original image was captured with fisheye lens
11176     *     </li>
11177     *   </ul>
11178     *
11179     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11180     *     of image, we can notice that on image a) these points are distorted.
11181     *   </li>
11182     * </ul>
11183     *
11184     *     ![image](pics/fisheye_undistorted.jpg)
11185     */
11186    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew) {
11187        fisheye_undistortImage_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj);
11188    }
11189
11190    /**
11191     * Transforms an image to compensate for fisheye lens distortion.
11192     *
11193     *     @param distorted image with fisheye lens distortion.
11194     *     @param undistorted Output image with compensated fisheye lens distortion.
11195     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11196     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11197     *     may additionally scale and shift the result by using a different matrix.
11198     *
11199     *     The function transforms an image to compensate radial and tangential lens distortion.
11200     *
11201     *     The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
11202     *     (with bilinear interpolation). See the former function for details of the transformation being
11203     *     performed.
11204     *
11205     *     See below the results of undistortImage.
11206     * <ul>
11207     *   <li>
11208     *           a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
11209     *             k_4, k_5, k_6) of distortion were optimized under calibration)
11210     *   <ul>
11211     *     <li>
11212     *            b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
11213     *             k_3, k_4) of fisheye distortion were optimized under calibration)
11214     *     </li>
11215     *     <li>
11216     *            c\) original image was captured with fisheye lens
11217     *     </li>
11218     *   </ul>
11219     *
11220     *     Pictures a) and b) almost the same. But if we consider points of image located far from the center
11221     *     of image, we can notice that on image a) these points are distorted.
11222     *   </li>
11223     * </ul>
11224     *
11225     *     ![image](pics/fisheye_undistorted.jpg)
11226     */
11227    public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D) {
11228        fisheye_undistortImage_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj);
11229    }
11230
11231
11232    //
11233    // C++:  void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
11234    //
11235
11236    /**
11237     * Estimates new camera intrinsic matrix for undistortion or rectification.
11238     *
11239     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11240     *     @param image_size Size of the image
11241     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11242     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11243     *     1-channel or 1x1 3-channel
11244     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11245     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11246     *     length. Balance is in range of [0, 1].
11247     *     @param new_size the new size
11248     *     @param fov_scale Divisor for new focal length.
11249     */
11250    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size, double fov_scale) {
11251        fisheye_estimateNewCameraMatrixForUndistortRectify_0(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height, fov_scale);
11252    }
11253
11254    /**
11255     * Estimates new camera intrinsic matrix for undistortion or rectification.
11256     *
11257     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11258     *     @param image_size Size of the image
11259     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11260     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11261     *     1-channel or 1x1 3-channel
11262     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11263     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11264     *     length. Balance is in range of [0, 1].
11265     *     @param new_size the new size
11266     */
11267    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size) {
11268        fisheye_estimateNewCameraMatrixForUndistortRectify_1(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height);
11269    }
11270
11271    /**
11272     * Estimates new camera intrinsic matrix for undistortion or rectification.
11273     *
11274     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11275     *     @param image_size Size of the image
11276     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11277     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11278     *     1-channel or 1x1 3-channel
11279     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11280     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11281     *     length. Balance is in range of [0, 1].
11282     */
11283    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance) {
11284        fisheye_estimateNewCameraMatrixForUndistortRectify_2(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance);
11285    }
11286
11287    /**
11288     * Estimates new camera intrinsic matrix for undistortion or rectification.
11289     *
11290     *     @param K Camera intrinsic matrix \(cameramatrix{K}\).
11291     *     @param image_size Size of the image
11292     *     @param D Input vector of distortion coefficients \(\distcoeffsfisheye\).
11293     *     @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
11294     *     1-channel or 1x1 3-channel
11295     *     @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
11296     *     length. Balance is in range of [0, 1].
11297     */
11298    public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P) {
11299        fisheye_estimateNewCameraMatrixForUndistortRectify_3(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj);
11300    }
11301
11302
11303    //
11304    // C++:  double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
11305    //
11306
11307    /**
11308     * Performs camera calibration
11309     *
11310     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11311     *     coordinate space.
11312     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11313     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11314     *     objectPoints[i].size() for each i.
11315     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11316     *     @param K Output 3x3 floating-point camera intrinsic matrix
11317     *     \(\cameramatrix{A}\) . If
11318     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11319     *     initialized before calling the function.
11320     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11321     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11322     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11323     *     the next output parameter description) brings the calibration pattern from the model coordinate
11324     *     space (in which object points are specified) to the world coordinate space, that is, a real
11325     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11326     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11327     *     @param flags Different flags that may be zero or a combination of the following values:
11328     * <ul>
11329     *   <li>
11330     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11331     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11332     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11333     *   </li>
11334     *   <li>
11335     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11336     *     of intrinsic optimization.
11337     *   </li>
11338     *   <li>
11339     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11340     *   </li>
11341     *   <li>
11342     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11343     *   </li>
11344     *   <li>
11345     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11346     *     are set to zeros and stay zero.
11347     *   </li>
11348     *   <li>
11349     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11350     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11351     *   </li>
11352     *   <li>
11353     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11354     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11355     *   </li>
11356     * </ul>
11357     *     @param criteria Termination criteria for the iterative optimization algorithm.
11358     * @return automatically generated
11359     */
11360    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) {
11361        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11362        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11363        Mat rvecs_mat = new Mat();
11364        Mat tvecs_mat = new Mat();
11365        double retVal = fisheye_calibrate_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
11366        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11367        rvecs_mat.release();
11368        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11369        tvecs_mat.release();
11370        return retVal;
11371    }
11372
11373    /**
11374     * Performs camera calibration
11375     *
11376     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11377     *     coordinate space.
11378     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11379     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11380     *     objectPoints[i].size() for each i.
11381     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11382     *     @param K Output 3x3 floating-point camera intrinsic matrix
11383     *     \(\cameramatrix{A}\) . If
11384     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11385     *     initialized before calling the function.
11386     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11387     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11388     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11389     *     the next output parameter description) brings the calibration pattern from the model coordinate
11390     *     space (in which object points are specified) to the world coordinate space, that is, a real
11391     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11392     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11393     *     @param flags Different flags that may be zero or a combination of the following values:
11394     * <ul>
11395     *   <li>
11396     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11397     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11398     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11399     *   </li>
11400     *   <li>
11401     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11402     *     of intrinsic optimization.
11403     *   </li>
11404     *   <li>
11405     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11406     *   </li>
11407     *   <li>
11408     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11409     *   </li>
11410     *   <li>
11411     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11412     *     are set to zeros and stay zero.
11413     *   </li>
11414     *   <li>
11415     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11416     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11417     *   </li>
11418     *   <li>
11419     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11420     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11421     *   </li>
11422     * </ul>
11423     * @return automatically generated
11424     */
11425    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags) {
11426        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11427        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11428        Mat rvecs_mat = new Mat();
11429        Mat tvecs_mat = new Mat();
11430        double retVal = fisheye_calibrate_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
11431        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11432        rvecs_mat.release();
11433        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11434        tvecs_mat.release();
11435        return retVal;
11436    }
11437
11438    /**
11439     * Performs camera calibration
11440     *
11441     *     @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
11442     *     coordinate space.
11443     *     @param imagePoints vector of vectors of the projections of calibration pattern points.
11444     *     imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
11445     *     objectPoints[i].size() for each i.
11446     *     @param image_size Size of the image used only to initialize the camera intrinsic matrix.
11447     *     @param K Output 3x3 floating-point camera intrinsic matrix
11448     *     \(\cameramatrix{A}\) . If
11449     *     REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
11450     *     initialized before calling the function.
11451     *     @param D Output vector of distortion coefficients \(\distcoeffsfisheye\).
11452     *     @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
11453     *     That is, each k-th rotation vector together with the corresponding k-th translation vector (see
11454     *     the next output parameter description) brings the calibration pattern from the model coordinate
11455     *     space (in which object points are specified) to the world coordinate space, that is, a real
11456     *     position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
11457     *     @param tvecs Output vector of translation vectors estimated for each pattern view.
11458     * <ul>
11459     *   <li>
11460     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
11461     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11462     *     center ( imageSize is used), and focal distances are computed in a least-squares fashion.
11463     *   </li>
11464     *   <li>
11465     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11466     *     of intrinsic optimization.
11467     *   </li>
11468     *   <li>
11469     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11470     *   </li>
11471     *   <li>
11472     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11473     *   </li>
11474     *   <li>
11475     *         REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients
11476     *     are set to zeros and stay zero.
11477     *   </li>
11478     *   <li>
11479     *         REF: fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
11480     * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11481     *   </li>
11482     *   <li>
11483     *         REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
11484     * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
11485     *   </li>
11486     * </ul>
11487     * @return automatically generated
11488     */
11489    public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs) {
11490        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11491        Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
11492        Mat rvecs_mat = new Mat();
11493        Mat tvecs_mat = new Mat();
11494        double retVal = fisheye_calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
11495        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11496        rvecs_mat.release();
11497        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11498        tvecs_mat.release();
11499        return retVal;
11500    }
11501
11502
11503    //
11504    // C++:  void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
11505    //
11506
11507    /**
11508     * Stereo rectification for fisheye camera model
11509     *
11510     *     @param K1 First camera intrinsic matrix.
11511     *     @param D1 First camera distortion parameters.
11512     *     @param K2 Second camera intrinsic matrix.
11513     *     @param D2 Second camera distortion parameters.
11514     *     @param imageSize Size of the image used for stereo calibration.
11515     *     @param R Rotation matrix between the coordinate systems of the first and the second
11516     *     cameras.
11517     *     @param tvec Translation vector between coordinate systems of the cameras.
11518     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
11519     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
11520     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
11521     *     camera.
11522     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
11523     *     camera.
11524     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
11525     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
11526     *     the function makes the principal points of each camera have the same pixel coordinates in the
11527     *     rectified views. And if the flag is not set, the function may still shift the images in the
11528     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
11529     *     useful image area.
11530     *     @param newImageSize New image resolution after rectification. The same size should be passed to
11531     *     #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
11532     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
11533     *     preserve details in the original image, especially when there is a big radial distortion.
11534     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11535     *     length. Balance is in range of [0, 1].
11536     *     @param fov_scale Divisor for new focal length.
11537     */
11538    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance, double fov_scale) {
11539        fisheye_stereoRectify_0(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance, fov_scale);
11540    }
11541
11542    /**
11543     * Stereo rectification for fisheye camera model
11544     *
11545     *     @param K1 First camera intrinsic matrix.
11546     *     @param D1 First camera distortion parameters.
11547     *     @param K2 Second camera intrinsic matrix.
11548     *     @param D2 Second camera distortion parameters.
11549     *     @param imageSize Size of the image used for stereo calibration.
11550     *     @param R Rotation matrix between the coordinate systems of the first and the second
11551     *     cameras.
11552     *     @param tvec Translation vector between coordinate systems of the cameras.
11553     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
11554     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
11555     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
11556     *     camera.
11557     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
11558     *     camera.
11559     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
11560     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
11561     *     the function makes the principal points of each camera have the same pixel coordinates in the
11562     *     rectified views. And if the flag is not set, the function may still shift the images in the
11563     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
11564     *     useful image area.
11565     *     @param newImageSize New image resolution after rectification. The same size should be passed to
11566     *     #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
11567     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
11568     *     preserve details in the original image, especially when there is a big radial distortion.
11569     *     @param balance Sets the new focal length in range between the min focal length and the max focal
11570     *     length. Balance is in range of [0, 1].
11571     */
11572    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance) {
11573        fisheye_stereoRectify_1(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance);
11574    }
11575
11576    /**
11577     * Stereo rectification for fisheye camera model
11578     *
11579     *     @param K1 First camera intrinsic matrix.
11580     *     @param D1 First camera distortion parameters.
11581     *     @param K2 Second camera intrinsic matrix.
11582     *     @param D2 Second camera distortion parameters.
11583     *     @param imageSize Size of the image used for stereo calibration.
11584     *     @param R Rotation matrix between the coordinate systems of the first and the second
11585     *     cameras.
11586     *     @param tvec Translation vector between coordinate systems of the cameras.
11587     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
11588     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
11589     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
11590     *     camera.
11591     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
11592     *     camera.
11593     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
11594     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
11595     *     the function makes the principal points of each camera have the same pixel coordinates in the
11596     *     rectified views. And if the flag is not set, the function may still shift the images in the
11597     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
11598     *     useful image area.
11599     *     @param newImageSize New image resolution after rectification. The same size should be passed to
11600     *     #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
11601     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
11602     *     preserve details in the original image, especially when there is a big radial distortion.
11603     *     length. Balance is in range of [0, 1].
11604     */
11605    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize) {
11606        fisheye_stereoRectify_2(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height);
11607    }
11608
11609    /**
11610     * Stereo rectification for fisheye camera model
11611     *
11612     *     @param K1 First camera intrinsic matrix.
11613     *     @param D1 First camera distortion parameters.
11614     *     @param K2 Second camera intrinsic matrix.
11615     *     @param D2 Second camera distortion parameters.
11616     *     @param imageSize Size of the image used for stereo calibration.
11617     *     @param R Rotation matrix between the coordinate systems of the first and the second
11618     *     cameras.
11619     *     @param tvec Translation vector between coordinate systems of the cameras.
11620     *     @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
11621     *     @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
11622     *     @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
11623     *     camera.
11624     *     @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
11625     *     camera.
11626     *     @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
11627     *     @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
11628     *     the function makes the principal points of each camera have the same pixel coordinates in the
11629     *     rectified views. And if the flag is not set, the function may still shift the images in the
11630     *     horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
11631     *     useful image area.
11632     *     #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
11633     *     is passed (default), it is set to the original imageSize . Setting it to larger value can help you
11634     *     preserve details in the original image, especially when there is a big radial distortion.
11635     *     length. Balance is in range of [0, 1].
11636     */
11637    public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) {
11638        fisheye_stereoRectify_3(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags);
11639    }
11640
11641
11642    //
11643    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, vector_Mat& rvecs, vector_Mat& tvecs, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
11644    //
11645
11646    /**
11647     * Performs stereo calibration
11648     *
11649     *     @param objectPoints Vector of vectors of the calibration pattern points.
11650     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
11651     *     observed by the first camera.
11652     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
11653     *     observed by the second camera.
11654     *     @param K1 Input/output first camera intrinsic matrix:
11655     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
11656     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
11657     *     some or all of the matrix components must be initialized.
11658     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
11659     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
11660     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
11661     *     similar to D1 .
11662     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
11663     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
11664     *     @param T Output translation vector between the coordinate systems of the cameras.
11665     *     @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
11666     *     coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
11667     *     i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
11668     *     description) brings the calibration pattern from the object coordinate space (in which object points are
11669     *     specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
11670     *     the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
11671     *     to camera coordinate space of the first camera of the stereo pair.
11672     *     @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
11673     *     of previous output parameter ( rvecs ).
11674     *     @param flags Different flags that may be zero or a combination of the following values:
11675     * <ul>
11676     *   <li>
11677     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
11678     *     are estimated.
11679     *   </li>
11680     *   <li>
11681     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
11682     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11683     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
11684     *   </li>
11685     *   <li>
11686     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11687     *     of intrinsic optimization.
11688     *   </li>
11689     *   <li>
11690     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11691     *   </li>
11692     *   <li>
11693     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11694     *   </li>
11695     *   <li>
11696     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
11697     *     zero.
11698     *   </li>
11699     * </ul>
11700     *     @param criteria Termination criteria for the iterative optimization algorithm.
11701     * @return automatically generated
11702     */
11703    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) {
11704        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11705        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11706        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11707        Mat rvecs_mat = new Mat();
11708        Mat tvecs_mat = new Mat();
11709        double retVal = fisheye_stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
11710        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11711        rvecs_mat.release();
11712        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11713        tvecs_mat.release();
11714        return retVal;
11715    }
11716
11717    /**
11718     * Performs stereo calibration
11719     *
11720     *     @param objectPoints Vector of vectors of the calibration pattern points.
11721     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
11722     *     observed by the first camera.
11723     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
11724     *     observed by the second camera.
11725     *     @param K1 Input/output first camera intrinsic matrix:
11726     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
11727     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
11728     *     some or all of the matrix components must be initialized.
11729     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
11730     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
11731     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
11732     *     similar to D1 .
11733     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
11734     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
11735     *     @param T Output translation vector between the coordinate systems of the cameras.
11736     *     @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
11737     *     coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
11738     *     i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
11739     *     description) brings the calibration pattern from the object coordinate space (in which object points are
11740     *     specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
11741     *     the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
11742     *     to camera coordinate space of the first camera of the stereo pair.
11743     *     @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
11744     *     of previous output parameter ( rvecs ).
11745     *     @param flags Different flags that may be zero or a combination of the following values:
11746     * <ul>
11747     *   <li>
11748     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
11749     *     are estimated.
11750     *   </li>
11751     *   <li>
11752     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
11753     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11754     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
11755     *   </li>
11756     *   <li>
11757     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11758     *     of intrinsic optimization.
11759     *   </li>
11760     *   <li>
11761     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11762     *   </li>
11763     *   <li>
11764     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11765     *   </li>
11766     *   <li>
11767     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
11768     *     zero.
11769     *   </li>
11770     * </ul>
11771     * @return automatically generated
11772     */
11773    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, List<Mat> rvecs, List<Mat> tvecs, int flags) {
11774        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11775        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11776        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11777        Mat rvecs_mat = new Mat();
11778        Mat tvecs_mat = new Mat();
11779        double retVal = fisheye_stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
11780        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11781        rvecs_mat.release();
11782        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11783        tvecs_mat.release();
11784        return retVal;
11785    }
11786
11787    /**
11788     * Performs stereo calibration
11789     *
11790     *     @param objectPoints Vector of vectors of the calibration pattern points.
11791     *     @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
11792     *     observed by the first camera.
11793     *     @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
11794     *     observed by the second camera.
11795     *     @param K1 Input/output first camera intrinsic matrix:
11796     *     \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If
11797     *     any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified,
11798     *     some or all of the matrix components must be initialized.
11799     *     @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements.
11800     *     @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
11801     *     @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
11802     *     similar to D1 .
11803     *     @param imageSize Size of the image used only to initialize camera intrinsic matrix.
11804     *     @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
11805     *     @param T Output translation vector between the coordinate systems of the cameras.
11806     *     @param rvecs Output vector of rotation vectors ( REF: Rodrigues ) estimated for each pattern view in the
11807     *     coordinate system of the first camera of the stereo pair (e.g. std::vector&lt;cv::Mat&gt;). More in detail, each
11808     *     i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
11809     *     description) brings the calibration pattern from the object coordinate space (in which object points are
11810     *     specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
11811     *     the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
11812     *     to camera coordinate space of the first camera of the stereo pair.
11813     *     @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
11814     *     of previous output parameter ( rvecs ).
11815     * <ul>
11816     *   <li>
11817     *         REF: fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
11818     *     are estimated.
11819     *   </li>
11820     *   <li>
11821     *         REF: fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
11822     *     fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
11823     *     center (imageSize is used), and focal distances are computed in a least-squares fashion.
11824     *   </li>
11825     *   <li>
11826     *         REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
11827     *     of intrinsic optimization.
11828     *   </li>
11829     *   <li>
11830     *         REF: fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
11831     *   </li>
11832     *   <li>
11833     *         REF: fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
11834     *   </li>
11835     *   <li>
11836     *        REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
11837     *     zero.
11838     *   </li>
11839     * </ul>
11840     * @return automatically generated
11841     */
11842    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, List<Mat> rvecs, List<Mat> tvecs) {
11843        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11844        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11845        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11846        Mat rvecs_mat = new Mat();
11847        Mat tvecs_mat = new Mat();
11848        double retVal = fisheye_stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
11849        Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
11850        rvecs_mat.release();
11851        Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
11852        tvecs_mat.release();
11853        return retVal;
11854    }
11855
11856
11857    //
11858    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
11859    //
11860
11861    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria) {
11862        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11863        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11864        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11865        return fisheye_stereoCalibrate_3(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
11866    }
11867
11868    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags) {
11869        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11870        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11871        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11872        return fisheye_stereoCalibrate_4(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags);
11873    }
11874
11875    public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T) {
11876        Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
11877        Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
11878        Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
11879        return fisheye_stereoCalibrate_5(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj);
11880    }
11881
11882
11883
11884
11885    // C++:  void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat())
11886    private static native void Rodrigues_0(long src_nativeObj, long dst_nativeObj, long jacobian_nativeObj);
11887    private static native void Rodrigues_1(long src_nativeObj, long dst_nativeObj);
11888
11889    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995)
11890    private static native long findHomography_0(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters, double confidence);
11891    private static native long findHomography_1(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters);
11892    private static native long findHomography_2(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj);
11893    private static native long findHomography_3(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold);
11894    private static native long findHomography_4(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method);
11895    private static native long findHomography_5(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj);
11896
11897    // C++:  Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params)
11898    private static native long findHomography_6(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, long mask_nativeObj, long params_nativeObj);
11899
11900    // C++:  Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat())
11901    private static native double[] RQDecomp3x3_0(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj, long Qz_nativeObj);
11902    private static native double[] RQDecomp3x3_1(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj);
11903    private static native double[] RQDecomp3x3_2(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj);
11904    private static native double[] RQDecomp3x3_3(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj);
11905
11906    // C++:  void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat())
11907    private static native void decomposeProjectionMatrix_0(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj, long eulerAngles_nativeObj);
11908    private static native void decomposeProjectionMatrix_1(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj);
11909    private static native void decomposeProjectionMatrix_2(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj);
11910    private static native void decomposeProjectionMatrix_3(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj);
11911    private static native void decomposeProjectionMatrix_4(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj);
11912
11913    // C++:  void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB)
11914    private static native void matMulDeriv_0(long A_nativeObj, long B_nativeObj, long dABdA_nativeObj, long dABdB_nativeObj);
11915
11916    // C++:  void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat())
11917    private static native void composeRT_0(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj, long dt3dt2_nativeObj);
11918    private static native void composeRT_1(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj);
11919    private static native void composeRT_2(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj);
11920    private static native void composeRT_3(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj);
11921    private static native void composeRT_4(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj);
11922    private static native void composeRT_5(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj);
11923    private static native void composeRT_6(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj);
11924    private static native void composeRT_7(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj);
11925    private static native void composeRT_8(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj);
11926
11927    // C++:  void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0)
11928    private static native void projectPoints_0(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj, double aspectRatio);
11929    private static native void projectPoints_1(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj);
11930    private static native void projectPoints_2(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj);
11931
11932    // C++:  bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE)
11933    private static native boolean solvePnP_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int flags);
11934    private static native boolean solvePnP_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess);
11935    private static native boolean solvePnP_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
11936
11937    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE)
11938    private static native boolean solvePnPRansac_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj, int flags);
11939    private static native boolean solvePnPRansac_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj);
11940    private static native boolean solvePnPRansac_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence);
11941    private static native boolean solvePnPRansac_3(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError);
11942    private static native boolean solvePnPRansac_4(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount);
11943    private static native boolean solvePnPRansac_5(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess);
11944    private static native boolean solvePnPRansac_6(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
11945
11946    // C++:  bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams())
11947    private static native boolean solvePnPRansac_7(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj, long params_nativeObj);
11948    private static native boolean solvePnPRansac_8(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj);
11949
11950    // C++:  int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags)
11951    private static native int solveP3P_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
11952
11953    // C++:  void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON))
11954    private static native void solvePnPRefineLM_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
11955    private static native void solvePnPRefineLM_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
11956
11957    // C++:  void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1)
11958    private static native void solvePnPRefineVVS_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, double VVSlambda);
11959    private static native void solvePnPRefineVVS_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
11960    private static native void solvePnPRefineVVS_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj);
11961
11962    // C++:  int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat())
11963    private static native int solvePnPGeneric_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj, long reprojectionError_nativeObj);
11964    private static native int solvePnPGeneric_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj);
11965    private static native int solvePnPGeneric_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj);
11966    private static native int solvePnPGeneric_3(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags);
11967    private static native int solvePnPGeneric_4(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess);
11968    private static native int solvePnPGeneric_5(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
11969
11970    // C++:  Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0)
11971    private static native long initCameraMatrix2D_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, double aspectRatio);
11972    private static native long initCameraMatrix2D_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height);
11973
11974    // C++:  bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE)
11975    private static native boolean findChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, int flags);
11976    private static native boolean findChessboardCorners_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj);
11977
11978    // C++:  bool cv::checkChessboard(Mat img, Size size)
11979    private static native boolean checkChessboard_0(long img_nativeObj, double size_width, double size_height);
11980
11981    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta)
11982    private static native boolean findChessboardCornersSBWithMeta_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags, long meta_nativeObj);
11983
11984    // C++:  bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0)
11985    private static native boolean findChessboardCornersSB_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags);
11986    private static native boolean findChessboardCornersSB_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj);
11987
11988    // C++:  Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat())
11989    private static native double[] estimateChessboardSharpness_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical, long sharpness_nativeObj);
11990    private static native double[] estimateChessboardSharpness_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical);
11991    private static native double[] estimateChessboardSharpness_2(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance);
11992    private static native double[] estimateChessboardSharpness_3(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj);
11993
11994    // C++:  bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size)
11995    private static native boolean find4QuadCornerSubpix_0(long img_nativeObj, long corners_nativeObj, double region_size_width, double region_size_height);
11996
11997    // C++:  void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound)
11998    private static native void drawChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, boolean patternWasFound);
11999
12000    // C++:  void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3)
12001    private static native void drawFrameAxes_0(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length, int thickness);
12002    private static native void drawFrameAxes_1(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length);
12003
12004    // C++:  bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create())
12005    private static native boolean findCirclesGrid_0(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj, int flags);
12006    private static native boolean findCirclesGrid_2(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj);
12007
12008    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12009    private static native double calibrateCameraExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12010    private static native double calibrateCameraExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags);
12011    private static native double calibrateCameraExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj);
12012
12013    // C++:  double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12014    private static native double calibrateCamera_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12015    private static native double calibrateCamera_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12016    private static native double calibrateCamera_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12017
12018    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12019    private static native double calibrateCameraROExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12020    private static native double calibrateCameraROExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags);
12021    private static native double calibrateCameraROExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj);
12022
12023    // C++:  double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON))
12024    private static native double calibrateCameraRO_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12025    private static native double calibrateCameraRO_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags);
12026    private static native double calibrateCameraRO_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj);
12027
12028    // C++:  void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio)
12029    private static native void calibrationMatrixValues_0(long cameraMatrix_nativeObj, double imageSize_width, double imageSize_height, double apertureWidth, double apertureHeight, double[] fovx_out, double[] fovy_out, double[] focalLength_out, double[] principalPoint_out, double[] aspectRatio_out);
12030
12031    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, vector_Mat& rvecs, vector_Mat& tvecs, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
12032    private static native double stereoCalibrateExtended_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12033    private static native double stereoCalibrateExtended_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long perViewErrors_nativeObj, int flags);
12034    private static native double stereoCalibrateExtended_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long perViewErrors_nativeObj);
12035
12036    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
12037    private static native double stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12038    private static native double stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags);
12039    private static native double stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj);
12040
12041    // C++:  double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
12042    private static native double stereoCalibrate_3(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12043    private static native double stereoCalibrate_4(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags);
12044    private static native double stereoCalibrate_5(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj);
12045
12046    // C++:  void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0)
12047    private static native void stereoRectify_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out, double[] validPixROI2_out);
12048    private static native void stereoRectify_1(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out);
12049    private static native void stereoRectify_2(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height);
12050    private static native void stereoRectify_3(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha);
12051    private static native void stereoRectify_4(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags);
12052    private static native void stereoRectify_5(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj);
12053
12054    // C++:  bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5)
12055    private static native boolean stereoRectifyUncalibrated_0(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj, double threshold);
12056    private static native boolean stereoRectifyUncalibrated_1(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj);
12057
12058    // C++:  float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags)
12059    private static native float rectify3Collinear_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long cameraMatrix3_nativeObj, long distCoeffs3_nativeObj, long imgpt1_mat_nativeObj, long imgpt3_mat_nativeObj, double imageSize_width, double imageSize_height, long R12_nativeObj, long T12_nativeObj, long R13_nativeObj, long T13_nativeObj, long R1_nativeObj, long R2_nativeObj, long R3_nativeObj, long P1_nativeObj, long P2_nativeObj, long P3_nativeObj, long Q_nativeObj, double alpha, double newImgSize_width, double newImgSize_height, double[] roi1_out, double[] roi2_out, int flags);
12060
12061    // C++:  Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false)
12062    private static native long getOptimalNewCameraMatrix_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out, boolean centerPrincipalPoint);
12063    private static native long getOptimalNewCameraMatrix_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out);
12064    private static native long getOptimalNewCameraMatrix_2(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height);
12065    private static native long getOptimalNewCameraMatrix_3(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha);
12066
12067    // C++:  void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI)
12068    private static native void calibrateHandEye_0(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj, int method);
12069    private static native void calibrateHandEye_1(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj);
12070
12071    // C++:  void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH)
12072    private static native void calibrateRobotWorldHandEye_0(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj, int method);
12073    private static native void calibrateRobotWorldHandEye_1(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj);
12074
12075    // C++:  void cv::convertPointsToHomogeneous(Mat src, Mat& dst)
12076    private static native void convertPointsToHomogeneous_0(long src_nativeObj, long dst_nativeObj);
12077
12078    // C++:  void cv::convertPointsFromHomogeneous(Mat src, Mat& dst)
12079    private static native void convertPointsFromHomogeneous_0(long src_nativeObj, long dst_nativeObj);
12080
12081    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat())
12082    private static native long findFundamentalMat_0(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters, long mask_nativeObj);
12083    private static native long findFundamentalMat_1(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters);
12084
12085    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat())
12086    private static native long findFundamentalMat_2(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, long mask_nativeObj);
12087    private static native long findFundamentalMat_3(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence);
12088    private static native long findFundamentalMat_4(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold);
12089    private static native long findFundamentalMat_5(long points1_mat_nativeObj, long points2_mat_nativeObj, int method);
12090    private static native long findFundamentalMat_6(long points1_mat_nativeObj, long points2_mat_nativeObj);
12091
12092    // C++:  Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params)
12093    private static native long findFundamentalMat_7(long points1_mat_nativeObj, long points2_mat_nativeObj, long mask_nativeObj, long params_nativeObj);
12094
12095    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
12096    private static native long findEssentialMat_0(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold, int maxIters, long mask_nativeObj);
12097    private static native long findEssentialMat_1(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold, int maxIters);
12098    private static native long findEssentialMat_2(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold);
12099    private static native long findEssentialMat_3(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob);
12100    private static native long findEssentialMat_4(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method);
12101    private static native long findEssentialMat_5(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj);
12102
12103    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat())
12104    private static native long findEssentialMat_6(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold, int maxIters, long mask_nativeObj);
12105    private static native long findEssentialMat_7(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold, int maxIters);
12106    private static native long findEssentialMat_8(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold);
12107    private static native long findEssentialMat_9(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob);
12108    private static native long findEssentialMat_10(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method);
12109    private static native long findEssentialMat_11(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y);
12110    private static native long findEssentialMat_12(long points1_nativeObj, long points2_nativeObj, double focal);
12111    private static native long findEssentialMat_13(long points1_nativeObj, long points2_nativeObj);
12112
12113    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
12114    private static native long findEssentialMat_14(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold, long mask_nativeObj);
12115    private static native long findEssentialMat_15(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold);
12116    private static native long findEssentialMat_16(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob);
12117    private static native long findEssentialMat_17(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method);
12118    private static native long findEssentialMat_18(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj);
12119
12120    // C++:  Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params)
12121    private static native long findEssentialMat_19(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long cameraMatrix2_nativeObj, long dist_coeff1_nativeObj, long dist_coeff2_nativeObj, long mask_nativeObj, long params_nativeObj);
12122
12123    // C++:  void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t)
12124    private static native void decomposeEssentialMat_0(long E_nativeObj, long R1_nativeObj, long R2_nativeObj, long t_nativeObj);
12125
12126    // C++:  int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat())
12127    private static native int recoverPose_0(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob, double threshold, long mask_nativeObj);
12128    private static native int recoverPose_1(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob, double threshold);
12129    private static native int recoverPose_2(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob);
12130    private static native int recoverPose_3(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method);
12131    private static native int recoverPose_4(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj);
12132
12133    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat())
12134    private static native int recoverPose_5(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, long mask_nativeObj);
12135    private static native int recoverPose_6(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj);
12136
12137    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat())
12138    private static native int recoverPose_7(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y, long mask_nativeObj);
12139    private static native int recoverPose_8(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y);
12140    private static native int recoverPose_9(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal);
12141    private static native int recoverPose_10(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj);
12142
12143    // C++:  int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat())
12144    private static native int recoverPose_11(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj, long triangulatedPoints_nativeObj);
12145    private static native int recoverPose_12(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj);
12146    private static native int recoverPose_13(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh);
12147
12148    // C++:  void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines)
12149    private static native void computeCorrespondEpilines_0(long points_nativeObj, int whichImage, long F_nativeObj, long lines_nativeObj);
12150
12151    // C++:  void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D)
12152    private static native void triangulatePoints_0(long projMatr1_nativeObj, long projMatr2_nativeObj, long projPoints1_nativeObj, long projPoints2_nativeObj, long points4D_nativeObj);
12153
12154    // C++:  void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2)
12155    private static native void correctMatches_0(long F_nativeObj, long points1_nativeObj, long points2_nativeObj, long newPoints1_nativeObj, long newPoints2_nativeObj);
12156
12157    // C++:  void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat())
12158    private static native void filterSpeckles_0(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff, long buf_nativeObj);
12159    private static native void filterSpeckles_1(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff);
12160
12161    // C++:  Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize)
12162    private static native double[] getValidDisparityROI_0(int roi1_x, int roi1_y, int roi1_width, int roi1_height, int roi2_x, int roi2_y, int roi2_width, int roi2_height, int minDisparity, int numberOfDisparities, int blockSize);
12163
12164    // C++:  void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1)
12165    private static native void validateDisparity_0(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities, int disp12MaxDisp);
12166    private static native void validateDisparity_1(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities);
12167
12168    // C++:  void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1)
12169    private static native void reprojectImageTo3D_0(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues, int ddepth);
12170    private static native void reprojectImageTo3D_1(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues);
12171    private static native void reprojectImageTo3D_2(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj);
12172
12173    // C++:  double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F)
12174    private static native double sampsonDistance_0(long pt1_nativeObj, long pt2_nativeObj, long F_nativeObj);
12175
12176    // C++:  int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
12177    private static native int estimateAffine3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence);
12178    private static native int estimateAffine3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold);
12179    private static native int estimateAffine3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj);
12180
12181    // C++:  Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true)
12182    private static native long estimateAffine3D_3(long src_nativeObj, long dst_nativeObj, double[] scale_out, boolean force_rotation);
12183    private static native long estimateAffine3D_4(long src_nativeObj, long dst_nativeObj, double[] scale_out);
12184    private static native long estimateAffine3D_5(long src_nativeObj, long dst_nativeObj);
12185
12186    // C++:  int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99)
12187    private static native int estimateTranslation3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence);
12188    private static native int estimateTranslation3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold);
12189    private static native int estimateTranslation3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj);
12190
12191    // C++:  Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
12192    private static native long estimateAffine2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters);
12193    private static native long estimateAffine2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence);
12194    private static native long estimateAffine2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters);
12195    private static native long estimateAffine2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold);
12196    private static native long estimateAffine2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method);
12197    private static native long estimateAffine2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj);
12198    private static native long estimateAffine2D_6(long from_nativeObj, long to_nativeObj);
12199
12200    // C++:  Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params)
12201    private static native long estimateAffine2D_7(long pts1_nativeObj, long pts2_nativeObj, long inliers_nativeObj, long params_nativeObj);
12202
12203    // C++:  Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10)
12204    private static native long estimateAffinePartial2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters);
12205    private static native long estimateAffinePartial2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence);
12206    private static native long estimateAffinePartial2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters);
12207    private static native long estimateAffinePartial2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold);
12208    private static native long estimateAffinePartial2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method);
12209    private static native long estimateAffinePartial2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj);
12210    private static native long estimateAffinePartial2D_6(long from_nativeObj, long to_nativeObj);
12211
12212    // C++:  int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals)
12213    private static native int decomposeHomographyMat_0(long H_nativeObj, long K_nativeObj, long rotations_mat_nativeObj, long translations_mat_nativeObj, long normals_mat_nativeObj);
12214
12215    // C++:  void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat())
12216    private static native void filterHomographyDecompByVisibleRefpoints_0(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj, long pointsMask_nativeObj);
12217    private static native void filterHomographyDecompByVisibleRefpoints_1(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj);
12218
12219    // C++:  void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
12220    private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj);
12221    private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
12222
12223    // C++:  void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
12224    private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
12225
12226    // C++:  void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
12227    private static native void initInverseRectificationMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
12228
12229    // C++:  Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
12230    private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint);
12231    private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height);
12232    private static native long getDefaultNewCameraMatrix_2(long cameraMatrix_nativeObj);
12233
12234    // C++:  void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
12235    private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj);
12236    private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj);
12237    private static native void undistortPoints_2(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
12238
12239    // C++:  void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria)
12240    private static native void undistortPointsIter_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12241
12242    // C++:  void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01))
12243    private static native void undistortImagePoints_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, int arg1_type, int arg1_maxCount, double arg1_epsilon);
12244    private static native void undistortImagePoints_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
12245
12246    // C++:  void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat())
12247    private static native void fisheye_projectPoints_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha, long jacobian_nativeObj);
12248    private static native void fisheye_projectPoints_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha);
12249    private static native void fisheye_projectPoints_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj);
12250
12251    // C++:  void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0)
12252    private static native void fisheye_distortPoints_0(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj, double alpha);
12253    private static native void fisheye_distortPoints_1(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj);
12254
12255    // C++:  void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8))
12256    private static native void fisheye_undistortPoints_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12257    private static native void fisheye_undistortPoints_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj);
12258    private static native void fisheye_undistortPoints_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj);
12259    private static native void fisheye_undistortPoints_3(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj);
12260
12261    // C++:  void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2)
12262    private static native void fisheye_initUndistortRectifyMap_0(long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
12263
12264    // C++:  void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size())
12265    private static native void fisheye_undistortImage_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj, double new_size_width, double new_size_height);
12266    private static native void fisheye_undistortImage_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj);
12267    private static native void fisheye_undistortImage_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj);
12268
12269    // C++:  void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0)
12270    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_0(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height, double fov_scale);
12271    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_1(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height);
12272    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_2(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance);
12273    private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_3(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj);
12274
12275    // C++:  double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12276    private static native double fisheye_calibrate_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12277    private static native double fisheye_calibrate_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12278    private static native double fisheye_calibrate_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12279
12280    // C++:  void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0)
12281    private static native void fisheye_stereoRectify_0(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance, double fov_scale);
12282    private static native void fisheye_stereoRectify_1(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance);
12283    private static native void fisheye_stereoRectify_2(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height);
12284    private static native void fisheye_stereoRectify_3(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags);
12285
12286    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, vector_Mat& rvecs, vector_Mat& tvecs, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12287    private static native double fisheye_stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12288    private static native double fisheye_stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags);
12289    private static native double fisheye_stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj);
12290
12291    // C++:  double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON))
12292    private static native double fisheye_stereoCalibrate_3(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon);
12293    private static native double fisheye_stereoCalibrate_4(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags);
12294    private static native double fisheye_stereoCalibrate_5(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj);
12295
12296}