001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.video;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.core.Mat;
009import org.opencv.core.MatOfByte;
010import org.opencv.core.MatOfFloat;
011import org.opencv.core.MatOfPoint2f;
012import org.opencv.core.Rect;
013import org.opencv.core.RotatedRect;
014import org.opencv.core.Size;
015import org.opencv.core.TermCriteria;
016import org.opencv.utils.Converters;
017import org.opencv.video.BackgroundSubtractorKNN;
018import org.opencv.video.BackgroundSubtractorMOG2;
019
020// C++: class Video
021
022public class Video {
023
024    private static final int
025            CV_LKFLOW_INITIAL_GUESSES = 4,
026            CV_LKFLOW_GET_MIN_EIGENVALS = 8;
027
028
029    // C++: enum <unnamed>
030    public static final int
031            OPTFLOW_USE_INITIAL_FLOW = 4,
032            OPTFLOW_LK_GET_MIN_EIGENVALS = 8,
033            OPTFLOW_FARNEBACK_GAUSSIAN = 256,
034            MOTION_TRANSLATION = 0,
035            MOTION_EUCLIDEAN = 1,
036            MOTION_AFFINE = 2,
037            MOTION_HOMOGRAPHY = 3;
038
039
040    // C++: enum MODE (cv.detail.TrackerSamplerCSC.MODE)
041    public static final int
042            TrackerSamplerCSC_MODE_INIT_POS = 1,
043            TrackerSamplerCSC_MODE_INIT_NEG = 2,
044            TrackerSamplerCSC_MODE_TRACK_POS = 3,
045            TrackerSamplerCSC_MODE_TRACK_NEG = 4,
046            TrackerSamplerCSC_MODE_DETECT = 5;
047
048
049    //
050    // C++:  Ptr_BackgroundSubtractorMOG2 cv::createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16, bool detectShadows = true)
051    //
052
053    /**
054     * Creates MOG2 Background Subtractor
055     *
056     * @param history Length of the history.
057     * @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
058     * to decide whether a pixel is well described by the background model. This parameter does not
059     * affect the background update.
060     * @param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
061     * speed a bit, so if you do not need this feature, set the parameter to false.
062     * @return automatically generated
063     */
064    public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, double varThreshold, boolean detectShadows) {
065        return BackgroundSubtractorMOG2.__fromPtr__(createBackgroundSubtractorMOG2_0(history, varThreshold, detectShadows));
066    }
067
068    /**
069     * Creates MOG2 Background Subtractor
070     *
071     * @param history Length of the history.
072     * @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model
073     * to decide whether a pixel is well described by the background model. This parameter does not
074     * affect the background update.
075     * speed a bit, so if you do not need this feature, set the parameter to false.
076     * @return automatically generated
077     */
078    public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history, double varThreshold) {
079        return BackgroundSubtractorMOG2.__fromPtr__(createBackgroundSubtractorMOG2_1(history, varThreshold));
080    }
081
082    /**
083     * Creates MOG2 Background Subtractor
084     *
085     * @param history Length of the history.
086     * to decide whether a pixel is well described by the background model. This parameter does not
087     * affect the background update.
088     * speed a bit, so if you do not need this feature, set the parameter to false.
089     * @return automatically generated
090     */
091    public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2(int history) {
092        return BackgroundSubtractorMOG2.__fromPtr__(createBackgroundSubtractorMOG2_2(history));
093    }
094
095    /**
096     * Creates MOG2 Background Subtractor
097     *
098     * to decide whether a pixel is well described by the background model. This parameter does not
099     * affect the background update.
100     * speed a bit, so if you do not need this feature, set the parameter to false.
101     * @return automatically generated
102     */
103    public static BackgroundSubtractorMOG2 createBackgroundSubtractorMOG2() {
104        return BackgroundSubtractorMOG2.__fromPtr__(createBackgroundSubtractorMOG2_3());
105    }
106
107
108    //
109    // C++:  Ptr_BackgroundSubtractorKNN cv::createBackgroundSubtractorKNN(int history = 500, double dist2Threshold = 400.0, bool detectShadows = true)
110    //
111
112    /**
113     * Creates KNN Background Subtractor
114     *
115     * @param history Length of the history.
116     * @param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
117     * whether a pixel is close to that sample. This parameter does not affect the background update.
118     * @param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the
119     * speed a bit, so if you do not need this feature, set the parameter to false.
120     * @return automatically generated
121     */
122    public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, double dist2Threshold, boolean detectShadows) {
123        return BackgroundSubtractorKNN.__fromPtr__(createBackgroundSubtractorKNN_0(history, dist2Threshold, detectShadows));
124    }
125
126    /**
127     * Creates KNN Background Subtractor
128     *
129     * @param history Length of the history.
130     * @param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide
131     * whether a pixel is close to that sample. This parameter does not affect the background update.
132     * speed a bit, so if you do not need this feature, set the parameter to false.
133     * @return automatically generated
134     */
135    public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history, double dist2Threshold) {
136        return BackgroundSubtractorKNN.__fromPtr__(createBackgroundSubtractorKNN_1(history, dist2Threshold));
137    }
138
139    /**
140     * Creates KNN Background Subtractor
141     *
142     * @param history Length of the history.
143     * whether a pixel is close to that sample. This parameter does not affect the background update.
144     * speed a bit, so if you do not need this feature, set the parameter to false.
145     * @return automatically generated
146     */
147    public static BackgroundSubtractorKNN createBackgroundSubtractorKNN(int history) {
148        return BackgroundSubtractorKNN.__fromPtr__(createBackgroundSubtractorKNN_2(history));
149    }
150
151    /**
152     * Creates KNN Background Subtractor
153     *
154     * whether a pixel is close to that sample. This parameter does not affect the background update.
155     * speed a bit, so if you do not need this feature, set the parameter to false.
156     * @return automatically generated
157     */
158    public static BackgroundSubtractorKNN createBackgroundSubtractorKNN() {
159        return BackgroundSubtractorKNN.__fromPtr__(createBackgroundSubtractorKNN_3());
160    }
161
162
163    //
164    // C++:  RotatedRect cv::CamShift(Mat probImage, Rect& window, TermCriteria criteria)
165    //
166
167    /**
168     * Finds an object center, size, and orientation.
169     *
170     * @param probImage Back projection of the object histogram. See calcBackProject.
171     * @param window Initial search window.
172     * @param criteria Stop criteria for the underlying meanShift.
173     * returns
174     * (in old interfaces) Number of iterations CAMSHIFT took to converge
175     * The function implements the CAMSHIFT object tracking algorithm CITE: Bradski98 . First, it finds an
176     * object center using meanShift and then adjusts the window size and finds the optimal rotation. The
177     * function returns the rotated rectangle structure that includes the object position, size, and
178     * orientation. The next position of the search window can be obtained with RotatedRect::boundingRect()
179     *
180     * See the OpenCV sample camshiftdemo.c that tracks colored objects.
181     *
182     * <b>Note:</b>
183     * <ul>
184     *   <li>
185     *    (Python) A sample explaining the camshift tracking algorithm can be found at
186     *     opencv_source_code/samples/python/camshift.py
187     *   </li>
188     * </ul>
189     * @return automatically generated
190     */
191    public static RotatedRect CamShift(Mat probImage, Rect window, TermCriteria criteria) {
192        double[] window_out = new double[4];
193        RotatedRect retVal = new RotatedRect(CamShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon));
194        if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } 
195        return retVal;
196    }
197
198
199    //
200    // C++:  int cv::meanShift(Mat probImage, Rect& window, TermCriteria criteria)
201    //
202
203    /**
204     * Finds an object on a back projection image.
205     *
206     * @param probImage Back projection of the object histogram. See calcBackProject for details.
207     * @param window Initial search window.
208     * @param criteria Stop criteria for the iterative search algorithm.
209     * returns
210     * :   Number of iterations CAMSHIFT took to converge.
211     * The function implements the iterative object search algorithm. It takes the input back projection of
212     * an object and the initial position. The mass center in window of the back projection image is
213     * computed and the search window center shifts to the mass center. The procedure is repeated until the
214     * specified number of iterations criteria.maxCount is done or until the window center shifts by less
215     * than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search
216     * window size or orientation do not change during the search. You can simply pass the output of
217     * calcBackProject to this function. But better results can be obtained if you pre-filter the back
218     * projection and remove the noise. For example, you can do this by retrieving connected components
219     * with findContours , throwing away contours with small area ( contourArea ), and rendering the
220     * remaining contours with drawContours.
221     * @return automatically generated
222     */
223    public static int meanShift(Mat probImage, Rect window, TermCriteria criteria) {
224        double[] window_out = new double[4];
225        int retVal = meanShift_0(probImage.nativeObj, window.x, window.y, window.width, window.height, window_out, criteria.type, criteria.maxCount, criteria.epsilon);
226        if(window!=null){ window.x = (int)window_out[0]; window.y = (int)window_out[1]; window.width = (int)window_out[2]; window.height = (int)window_out[3]; } 
227        return retVal;
228    }
229
230
231    //
232    // C++:  int cv::buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true)
233    //
234
235    /**
236     * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
237     *
238     * @param img 8-bit input image.
239     * @param pyramid output pyramid.
240     * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
241     * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
242     * @param maxLevel 0-based maximal pyramid level number.
243     * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
244     * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
245     * @param pyrBorder the border mode for pyramid layers.
246     * @param derivBorder the border mode for gradients.
247     * @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false
248     * to force data copying.
249     * @return number of levels in constructed pyramid. Can be less than maxLevel.
250     */
251    public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage) {
252        Mat pyramid_mat = new Mat();
253        int retVal = buildOpticalFlowPyramid_0(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage);
254        Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
255        pyramid_mat.release();
256        return retVal;
257    }
258
259    /**
260     * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
261     *
262     * @param img 8-bit input image.
263     * @param pyramid output pyramid.
264     * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
265     * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
266     * @param maxLevel 0-based maximal pyramid level number.
267     * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
268     * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
269     * @param pyrBorder the border mode for pyramid layers.
270     * @param derivBorder the border mode for gradients.
271     * to force data copying.
272     * @return number of levels in constructed pyramid. Can be less than maxLevel.
273     */
274    public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder) {
275        Mat pyramid_mat = new Mat();
276        int retVal = buildOpticalFlowPyramid_1(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder);
277        Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
278        pyramid_mat.release();
279        return retVal;
280    }
281
282    /**
283     * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
284     *
285     * @param img 8-bit input image.
286     * @param pyramid output pyramid.
287     * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
288     * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
289     * @param maxLevel 0-based maximal pyramid level number.
290     * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
291     * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
292     * @param pyrBorder the border mode for pyramid layers.
293     * to force data copying.
294     * @return number of levels in constructed pyramid. Can be less than maxLevel.
295     */
296    public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder) {
297        Mat pyramid_mat = new Mat();
298        int retVal = buildOpticalFlowPyramid_2(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder);
299        Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
300        pyramid_mat.release();
301        return retVal;
302    }
303
304    /**
305     * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
306     *
307     * @param img 8-bit input image.
308     * @param pyramid output pyramid.
309     * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
310     * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
311     * @param maxLevel 0-based maximal pyramid level number.
312     * @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
313     * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
314     * to force data copying.
315     * @return number of levels in constructed pyramid. Can be less than maxLevel.
316     */
317    public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives) {
318        Mat pyramid_mat = new Mat();
319        int retVal = buildOpticalFlowPyramid_3(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives);
320        Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
321        pyramid_mat.release();
322        return retVal;
323    }
324
325    /**
326     * Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
327     *
328     * @param img 8-bit input image.
329     * @param pyramid output pyramid.
330     * @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
331     * calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
332     * @param maxLevel 0-based maximal pyramid level number.
333     * constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
334     * to force data copying.
335     * @return number of levels in constructed pyramid. Can be less than maxLevel.
336     */
337    public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel) {
338        Mat pyramid_mat = new Mat();
339        int retVal = buildOpticalFlowPyramid_4(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel);
340        Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
341        pyramid_mat.release();
342        return retVal;
343    }
344
345
346    //
347    // C++:  void cv::calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4)
348    //
349
350    /**
351     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
352     * pyramids.
353     *
354     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
355     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
356     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
357     * single-precision floating-point numbers.
358     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
359     * containing the calculated new positions of input features in the second image; when
360     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
361     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
362     * the flow for the corresponding features has been found, otherwise, it is set to 0.
363     * @param err output vector of errors; each element of the vector is set to an error for the
364     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
365     * found then the error is not defined (use the status parameter to find such cases).
366     * @param winSize size of the search window at each pyramid level.
367     * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
368     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
369     * algorithm will use as many levels as pyramids have but no more than maxLevel.
370     * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
371     * (after the specified maximum number of iterations criteria.maxCount or when the search window
372     * moves by less than criteria.epsilon.
373     * @param flags operation flags:
374     * <ul>
375     *   <li>
376     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
377     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
378     *   </li>
379     *   <li>
380     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
381     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
382     *      around the original and a moved point, divided by number of pixels in a window, is used as a
383     *      error measure.
384     *   </li>
385     * </ul>
386     * @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
387     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
388     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
389     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
390     * performance boost.
391     *
392     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
393     * CITE: Bouguet00 . The function is parallelized with the TBB library.
394     *
395     * <b>Note:</b>
396     *
397     * <ul>
398     *   <li>
399     *    An example using the Lucas-Kanade optical flow algorithm can be found at
400     *     opencv_source_code/samples/cpp/lkdemo.cpp
401     *   </li>
402     *   <li>
403     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
404     *     opencv_source_code/samples/python/lk_track.py
405     *   </li>
406     *   <li>
407     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
408     *     opencv_source_code/samples/python/lk_homography.py
409     *   </li>
410     * </ul>
411     */
412    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold) {
413        Mat prevPts_mat = prevPts;
414        Mat nextPts_mat = nextPts;
415        Mat status_mat = status;
416        Mat err_mat = err;
417        calcOpticalFlowPyrLK_0(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon, flags, minEigThreshold);
418    }
419
420    /**
421     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
422     * pyramids.
423     *
424     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
425     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
426     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
427     * single-precision floating-point numbers.
428     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
429     * containing the calculated new positions of input features in the second image; when
430     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
431     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
432     * the flow for the corresponding features has been found, otherwise, it is set to 0.
433     * @param err output vector of errors; each element of the vector is set to an error for the
434     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
435     * found then the error is not defined (use the status parameter to find such cases).
436     * @param winSize size of the search window at each pyramid level.
437     * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
438     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
439     * algorithm will use as many levels as pyramids have but no more than maxLevel.
440     * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
441     * (after the specified maximum number of iterations criteria.maxCount or when the search window
442     * moves by less than criteria.epsilon.
443     * @param flags operation flags:
444     * <ul>
445     *   <li>
446     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
447     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
448     *   </li>
449     *   <li>
450     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
451     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
452     *      around the original and a moved point, divided by number of pixels in a window, is used as a
453     *      error measure.
454     *   </li>
455     * </ul>
456     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
457     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
458     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
459     * performance boost.
460     *
461     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
462     * CITE: Bouguet00 . The function is parallelized with the TBB library.
463     *
464     * <b>Note:</b>
465     *
466     * <ul>
467     *   <li>
468     *    An example using the Lucas-Kanade optical flow algorithm can be found at
469     *     opencv_source_code/samples/cpp/lkdemo.cpp
470     *   </li>
471     *   <li>
472     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
473     *     opencv_source_code/samples/python/lk_track.py
474     *   </li>
475     *   <li>
476     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
477     *     opencv_source_code/samples/python/lk_homography.py
478     *   </li>
479     * </ul>
480     */
481    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria, int flags) {
482        Mat prevPts_mat = prevPts;
483        Mat nextPts_mat = nextPts;
484        Mat status_mat = status;
485        Mat err_mat = err;
486        calcOpticalFlowPyrLK_1(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon, flags);
487    }
488
489    /**
490     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
491     * pyramids.
492     *
493     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
494     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
495     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
496     * single-precision floating-point numbers.
497     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
498     * containing the calculated new positions of input features in the second image; when
499     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
500     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
501     * the flow for the corresponding features has been found, otherwise, it is set to 0.
502     * @param err output vector of errors; each element of the vector is set to an error for the
503     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
504     * found then the error is not defined (use the status parameter to find such cases).
505     * @param winSize size of the search window at each pyramid level.
506     * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
507     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
508     * algorithm will use as many levels as pyramids have but no more than maxLevel.
509     * @param criteria parameter, specifying the termination criteria of the iterative search algorithm
510     * (after the specified maximum number of iterations criteria.maxCount or when the search window
511     * moves by less than criteria.epsilon.
512     * <ul>
513     *   <li>
514     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
515     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
516     *   </li>
517     *   <li>
518     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
519     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
520     *      around the original and a moved point, divided by number of pixels in a window, is used as a
521     *      error measure.
522     *   </li>
523     * </ul>
524     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
525     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
526     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
527     * performance boost.
528     *
529     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
530     * CITE: Bouguet00 . The function is parallelized with the TBB library.
531     *
532     * <b>Note:</b>
533     *
534     * <ul>
535     *   <li>
536     *    An example using the Lucas-Kanade optical flow algorithm can be found at
537     *     opencv_source_code/samples/cpp/lkdemo.cpp
538     *   </li>
539     *   <li>
540     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
541     *     opencv_source_code/samples/python/lk_track.py
542     *   </li>
543     *   <li>
544     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
545     *     opencv_source_code/samples/python/lk_homography.py
546     *   </li>
547     * </ul>
548     */
549    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria) {
550        Mat prevPts_mat = prevPts;
551        Mat nextPts_mat = nextPts;
552        Mat status_mat = status;
553        Mat err_mat = err;
554        calcOpticalFlowPyrLK_2(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon);
555    }
556
557    /**
558     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
559     * pyramids.
560     *
561     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
562     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
563     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
564     * single-precision floating-point numbers.
565     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
566     * containing the calculated new positions of input features in the second image; when
567     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
568     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
569     * the flow for the corresponding features has been found, otherwise, it is set to 0.
570     * @param err output vector of errors; each element of the vector is set to an error for the
571     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
572     * found then the error is not defined (use the status parameter to find such cases).
573     * @param winSize size of the search window at each pyramid level.
574     * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
575     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
576     * algorithm will use as many levels as pyramids have but no more than maxLevel.
577     * (after the specified maximum number of iterations criteria.maxCount or when the search window
578     * moves by less than criteria.epsilon.
579     * <ul>
580     *   <li>
581     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
582     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
583     *   </li>
584     *   <li>
585     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
586     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
587     *      around the original and a moved point, divided by number of pixels in a window, is used as a
588     *      error measure.
589     *   </li>
590     * </ul>
591     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
592     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
593     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
594     * performance boost.
595     *
596     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
597     * CITE: Bouguet00 . The function is parallelized with the TBB library.
598     *
599     * <b>Note:</b>
600     *
601     * <ul>
602     *   <li>
603     *    An example using the Lucas-Kanade optical flow algorithm can be found at
604     *     opencv_source_code/samples/cpp/lkdemo.cpp
605     *   </li>
606     *   <li>
607     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
608     *     opencv_source_code/samples/python/lk_track.py
609     *   </li>
610     *   <li>
611     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
612     *     opencv_source_code/samples/python/lk_homography.py
613     *   </li>
614     * </ul>
615     */
616    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel) {
617        Mat prevPts_mat = prevPts;
618        Mat nextPts_mat = nextPts;
619        Mat status_mat = status;
620        Mat err_mat = err;
621        calcOpticalFlowPyrLK_3(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel);
622    }
623
624    /**
625     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
626     * pyramids.
627     *
628     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
629     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
630     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
631     * single-precision floating-point numbers.
632     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
633     * containing the calculated new positions of input features in the second image; when
634     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
635     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
636     * the flow for the corresponding features has been found, otherwise, it is set to 0.
637     * @param err output vector of errors; each element of the vector is set to an error for the
638     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
639     * found then the error is not defined (use the status parameter to find such cases).
640     * @param winSize size of the search window at each pyramid level.
641     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
642     * algorithm will use as many levels as pyramids have but no more than maxLevel.
643     * (after the specified maximum number of iterations criteria.maxCount or when the search window
644     * moves by less than criteria.epsilon.
645     * <ul>
646     *   <li>
647     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
648     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
649     *   </li>
650     *   <li>
651     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
652     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
653     *      around the original and a moved point, divided by number of pixels in a window, is used as a
654     *      error measure.
655     *   </li>
656     * </ul>
657     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
658     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
659     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
660     * performance boost.
661     *
662     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
663     * CITE: Bouguet00 . The function is parallelized with the TBB library.
664     *
665     * <b>Note:</b>
666     *
667     * <ul>
668     *   <li>
669     *    An example using the Lucas-Kanade optical flow algorithm can be found at
670     *     opencv_source_code/samples/cpp/lkdemo.cpp
671     *   </li>
672     *   <li>
673     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
674     *     opencv_source_code/samples/python/lk_track.py
675     *   </li>
676     *   <li>
677     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
678     *     opencv_source_code/samples/python/lk_homography.py
679     *   </li>
680     * </ul>
681     */
682    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize) {
683        Mat prevPts_mat = prevPts;
684        Mat nextPts_mat = nextPts;
685        Mat status_mat = status;
686        Mat err_mat = err;
687        calcOpticalFlowPyrLK_4(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height);
688    }
689
690    /**
691     * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
692     * pyramids.
693     *
694     * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
695     * @param nextImg second input image or pyramid of the same size and the same type as prevImg.
696     * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
697     * single-precision floating-point numbers.
698     * @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
699     * containing the calculated new positions of input features in the second image; when
700     * OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
701     * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
702     * the flow for the corresponding features has been found, otherwise, it is set to 0.
703     * @param err output vector of errors; each element of the vector is set to an error for the
704     * corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
705     * found then the error is not defined (use the status parameter to find such cases).
706     * level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
707     * algorithm will use as many levels as pyramids have but no more than maxLevel.
708     * (after the specified maximum number of iterations criteria.maxCount or when the search window
709     * moves by less than criteria.epsilon.
710     * <ul>
711     *   <li>
712     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses initial estimations, stored in nextPts; if the flag is
713     *      not set, then prevPts is copied to nextPts and is considered the initial estimate.
714     *   </li>
715     *   <li>
716     *     <b>OPTFLOW_LK_GET_MIN_EIGENVALS</b> use minimum eigen values as an error measure (see
717     *      minEigThreshold description); if the flag is not set, then L1 distance between patches
718     *      around the original and a moved point, divided by number of pixels in a window, is used as a
719     *      error measure.
720     *   </li>
721     * </ul>
722     * optical flow equations (this matrix is called a spatial gradient matrix in CITE: Bouguet00), divided
723     * by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
724     * feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
725     * performance boost.
726     *
727     * The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
728     * CITE: Bouguet00 . The function is parallelized with the TBB library.
729     *
730     * <b>Note:</b>
731     *
732     * <ul>
733     *   <li>
734     *    An example using the Lucas-Kanade optical flow algorithm can be found at
735     *     opencv_source_code/samples/cpp/lkdemo.cpp
736     *   </li>
737     *   <li>
738     *    (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
739     *     opencv_source_code/samples/python/lk_track.py
740     *   </li>
741     *   <li>
742     *    (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
743     *     opencv_source_code/samples/python/lk_homography.py
744     *   </li>
745     * </ul>
746     */
747    public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err) {
748        Mat prevPts_mat = prevPts;
749        Mat nextPts_mat = nextPts;
750        Mat status_mat = status;
751        Mat err_mat = err;
752        calcOpticalFlowPyrLK_5(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj);
753    }
754
755
756    //
757    // C++:  void cv::calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags)
758    //
759
760    /**
761     * Computes a dense optical flow using the Gunnar Farneback's algorithm.
762     *
763     * @param prev first 8-bit single-channel input image.
764     * @param next second input image of the same size and the same type as prev.
765     * @param flow computed flow image that has the same size as prev and type CV_32FC2.
766     * @param pyr_scale parameter, specifying the image scale (&lt;1) to build pyramids for each image;
767     * pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
768     * one.
769     * @param levels number of pyramid layers including the initial image; levels=1 means that no extra
770     * layers are created and only the original images are used.
771     * @param winsize averaging window size; larger values increase the algorithm robustness to image
772     * noise and give more chances for fast motion detection, but yield more blurred motion field.
773     * @param iterations number of iterations the algorithm does at each pyramid level.
774     * @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
775     * larger values mean that the image will be approximated with smoother surfaces, yielding more
776     * robust algorithm and more blurred motion field, typically poly_n =5 or 7.
777     * @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
778     * basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
779     * good value would be poly_sigma=1.5.
780     * @param flags operation flags that can be a combination of the following:
781     * <ul>
782     *   <li>
783     *     <b>OPTFLOW_USE_INITIAL_FLOW</b> uses the input flow as an initial flow approximation.
784     *   </li>
785     *   <li>
786     *     <b>OPTFLOW_FARNEBACK_GAUSSIAN</b> uses the Gaussian \(\texttt{winsize}\times\texttt{winsize}\)
787     *      filter instead of a box filter of the same size for optical flow estimation; usually, this
788     *      option gives z more accurate flow than with a box filter, at the cost of lower speed;
789     *      normally, winsize for a Gaussian window should be set to a larger value to achieve the same
790     *      level of robustness.
791     *   </li>
792     * </ul>
793     *
794     * The function finds an optical flow for each prev pixel using the CITE: Farneback2003 algorithm so that
795     *
796     * \(\texttt{prev} (y,x)  \sim \texttt{next} ( y + \texttt{flow} (y,x)[1],  x + \texttt{flow} (y,x)[0])\)
797     *
798     * <b>Note:</b>
799     *
800     * <ul>
801     *   <li>
802     *    An example using the optical flow algorithm described by Gunnar Farneback can be found at
803     *     opencv_source_code/samples/cpp/fback.cpp
804     *   </li>
805     *   <li>
806     *    (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
807     *     found at opencv_source_code/samples/python/opt_flow.py
808     *   </li>
809     * </ul>
810     */
811    public static void calcOpticalFlowFarneback(Mat prev, Mat next, Mat flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) {
812        calcOpticalFlowFarneback_0(prev.nativeObj, next.nativeObj, flow.nativeObj, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags);
813    }
814
815
816    //
817    // C++:  double cv::computeECC(Mat templateImage, Mat inputImage, Mat inputMask = Mat())
818    //
819
820    /**
821     * Computes the Enhanced Correlation Coefficient value between two images CITE: EP08 .
822     *
823     * @param templateImage single-channel template image; CV_8U or CV_32F array.
824     * @param inputImage single-channel input image to be warped to provide an image similar to
825     *  templateImage, same type as templateImage.
826     * @param inputMask An optional mask to indicate valid values of inputImage.
827     *
828     * SEE:
829     * findTransformECC
830     * @return automatically generated
831     */
832    public static double computeECC(Mat templateImage, Mat inputImage, Mat inputMask) {
833        return computeECC_0(templateImage.nativeObj, inputImage.nativeObj, inputMask.nativeObj);
834    }
835
836    /**
837     * Computes the Enhanced Correlation Coefficient value between two images CITE: EP08 .
838     *
839     * @param templateImage single-channel template image; CV_8U or CV_32F array.
840     * @param inputImage single-channel input image to be warped to provide an image similar to
841     *  templateImage, same type as templateImage.
842     *
843     * SEE:
844     * findTransformECC
845     * @return automatically generated
846     */
847    public static double computeECC(Mat templateImage, Mat inputImage) {
848        return computeECC_1(templateImage.nativeObj, inputImage.nativeObj);
849    }
850
851
852    //
853    // C++:  double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize)
854    //
855
856    /**
857     * Finds the geometric transform (warp) between two images in terms of the ECC criterion CITE: EP08 .
858     *
859     * @param templateImage single-channel template image; CV_8U or CV_32F array.
860     * @param inputImage single-channel input image which should be warped with the final warpMatrix in
861     * order to provide an image similar to templateImage, same type as templateImage.
862     * @param warpMatrix floating-point \(2\times 3\) or \(3\times 3\) mapping matrix (warp).
863     * @param motionType parameter, specifying the type of motion:
864     * <ul>
865     *   <li>
866     *     <b>MOTION_TRANSLATION</b> sets a translational motion model; warpMatrix is \(2\times 3\) with
867     *      the first \(2\times 2\) part being the unity matrix and the rest two parameters being
868     *      estimated.
869     *   </li>
870     *   <li>
871     *     <b>MOTION_EUCLIDEAN</b> sets a Euclidean (rigid) transformation as motion model; three
872     *      parameters are estimated; warpMatrix is \(2\times 3\).
873     *   </li>
874     *   <li>
875     *     <b>MOTION_AFFINE</b> sets an affine motion model (DEFAULT); six parameters are estimated;
876     *      warpMatrix is \(2\times 3\).
877     *   </li>
878     *   <li>
879     *     <b>MOTION_HOMOGRAPHY</b> sets a homography as a motion model; eight parameters are
880     *      estimated;\{@code warpMatrix\} is \(3\times 3\).
881     *   </li>
882     * </ul>
883     * @param criteria parameter, specifying the termination criteria of the ECC algorithm;
884     * criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
885     * iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
886     * Default values are shown in the declaration above.
887     * @param inputMask An optional mask to indicate valid values of inputImage.
888     * @param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
889     *
890     * The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion
891     * (CITE: EP08), that is
892     *
893     * \(\texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\)
894     *
895     * where
896     *
897     * \(\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\)
898     *
899     * (the equation holds with homogeneous coordinates for homography). It returns the final enhanced
900     * correlation coefficient, that is the correlation coefficient between the template image and the
901     * final warped input image. When a \(3\times 3\) matrix is given with motionType =0, 1 or 2, the third
902     * row is ignored.
903     *
904     * Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
905     * area-based alignment that builds on intensity similarities. In essence, the function updates the
906     * initial transformation that roughly aligns the images. If this information is missing, the identity
907     * warp (unity matrix) is used as an initialization. Note that if images undergo strong
908     * displacements/rotations, an initial transformation that roughly aligns the images is necessary
909     * (e.g., a simple euclidean/similarity transform that allows for the images showing the same image
910     * content approximately). Use inverse warping in the second image to take an image close to the first
911     * one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
912     * sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
913     * an exception if algorithm does not converges.
914     *
915     * SEE:
916     * computeECC, estimateAffine2D, estimateAffinePartial2D, findHomography
917     * @return automatically generated
918     */
919    public static double findTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize) {
920        return findTransformECC_0(templateImage.nativeObj, inputImage.nativeObj, warpMatrix.nativeObj, motionType, criteria.type, criteria.maxCount, criteria.epsilon, inputMask.nativeObj, gaussFiltSize);
921    }
922
923
924    //
925    // C++:  double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType = MOTION_AFFINE, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001), Mat inputMask = Mat())
926    //
927
928    public static double findTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType, TermCriteria criteria, Mat inputMask) {
929        return findTransformECC_1(templateImage.nativeObj, inputImage.nativeObj, warpMatrix.nativeObj, motionType, criteria.type, criteria.maxCount, criteria.epsilon, inputMask.nativeObj);
930    }
931
932    public static double findTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType, TermCriteria criteria) {
933        return findTransformECC_2(templateImage.nativeObj, inputImage.nativeObj, warpMatrix.nativeObj, motionType, criteria.type, criteria.maxCount, criteria.epsilon);
934    }
935
936    public static double findTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix, int motionType) {
937        return findTransformECC_3(templateImage.nativeObj, inputImage.nativeObj, warpMatrix.nativeObj, motionType);
938    }
939
940    public static double findTransformECC(Mat templateImage, Mat inputImage, Mat warpMatrix) {
941        return findTransformECC_4(templateImage.nativeObj, inputImage.nativeObj, warpMatrix.nativeObj);
942    }
943
944
945    //
946    // C++:  Mat cv::readOpticalFlow(String path)
947    //
948
949    /**
950     * Read a .flo file
951     *
952     *  @param path Path to the file to be loaded
953     *
954     *  The function readOpticalFlow loads a flow field from a file and returns it as a single matrix.
955     *  Resulting Mat has a type CV_32FC2 - floating-point, 2-channel. First channel corresponds to the
956     *  flow in the horizontal direction (u), second - vertical (v).
957     * @return automatically generated
958     */
959    public static Mat readOpticalFlow(String path) {
960        return new Mat(readOpticalFlow_0(path));
961    }
962
963
964    //
965    // C++:  bool cv::writeOpticalFlow(String path, Mat flow)
966    //
967
968    /**
969     * Write a .flo to disk
970     *
971     *  @param path Path to the file to be written
972     *  @param flow Flow field to be stored
973     *
974     *  The function stores a flow field in a file, returns true on success, false otherwise.
975     *  The flow field must be a 2-channel, floating-point matrix (CV_32FC2). First channel corresponds
976     *  to the flow in the horizontal direction (u), second - vertical (v).
977     * @return automatically generated
978     */
979    public static boolean writeOpticalFlow(String path, Mat flow) {
980        return writeOpticalFlow_0(path, flow.nativeObj);
981    }
982
983
984
985
986    // C++:  Ptr_BackgroundSubtractorMOG2 cv::createBackgroundSubtractorMOG2(int history = 500, double varThreshold = 16, bool detectShadows = true)
987    private static native long createBackgroundSubtractorMOG2_0(int history, double varThreshold, boolean detectShadows);
988    private static native long createBackgroundSubtractorMOG2_1(int history, double varThreshold);
989    private static native long createBackgroundSubtractorMOG2_2(int history);
990    private static native long createBackgroundSubtractorMOG2_3();
991
992    // C++:  Ptr_BackgroundSubtractorKNN cv::createBackgroundSubtractorKNN(int history = 500, double dist2Threshold = 400.0, bool detectShadows = true)
993    private static native long createBackgroundSubtractorKNN_0(int history, double dist2Threshold, boolean detectShadows);
994    private static native long createBackgroundSubtractorKNN_1(int history, double dist2Threshold);
995    private static native long createBackgroundSubtractorKNN_2(int history);
996    private static native long createBackgroundSubtractorKNN_3();
997
998    // C++:  RotatedRect cv::CamShift(Mat probImage, Rect& window, TermCriteria criteria)
999    private static native double[] CamShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon);
1000
1001    // C++:  int cv::meanShift(Mat probImage, Rect& window, TermCriteria criteria)
1002    private static native int meanShift_0(long probImage_nativeObj, int window_x, int window_y, int window_width, int window_height, double[] window_out, int criteria_type, int criteria_maxCount, double criteria_epsilon);
1003
1004    // C++:  int cv::buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true)
1005    private static native int buildOpticalFlowPyramid_0(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage);
1006    private static native int buildOpticalFlowPyramid_1(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder);
1007    private static native int buildOpticalFlowPyramid_2(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives, int pyrBorder);
1008    private static native int buildOpticalFlowPyramid_3(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, boolean withDerivatives);
1009    private static native int buildOpticalFlowPyramid_4(long img_nativeObj, long pyramid_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel);
1010
1011    // C++:  void cv::calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, vector_Point2f prevPts, vector_Point2f& nextPts, vector_uchar& status, vector_float& err, Size winSize = Size(21,21), int maxLevel = 3, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), int flags = 0, double minEigThreshold = 1e-4)
1012    private static native void calcOpticalFlowPyrLK_0(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags, double minEigThreshold);
1013    private static native void calcOpticalFlowPyrLK_1(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, int criteria_type, int criteria_maxCount, double criteria_epsilon, int flags);
1014    private static native void calcOpticalFlowPyrLK_2(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel, int criteria_type, int criteria_maxCount, double criteria_epsilon);
1015    private static native void calcOpticalFlowPyrLK_3(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height, int maxLevel);
1016    private static native void calcOpticalFlowPyrLK_4(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj, double winSize_width, double winSize_height);
1017    private static native void calcOpticalFlowPyrLK_5(long prevImg_nativeObj, long nextImg_nativeObj, long prevPts_mat_nativeObj, long nextPts_mat_nativeObj, long status_mat_nativeObj, long err_mat_nativeObj);
1018
1019    // C++:  void cv::calcOpticalFlowFarneback(Mat prev, Mat next, Mat& flow, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags)
1020    private static native void calcOpticalFlowFarneback_0(long prev_nativeObj, long next_nativeObj, long flow_nativeObj, double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags);
1021
1022    // C++:  double cv::computeECC(Mat templateImage, Mat inputImage, Mat inputMask = Mat())
1023    private static native double computeECC_0(long templateImage_nativeObj, long inputImage_nativeObj, long inputMask_nativeObj);
1024    private static native double computeECC_1(long templateImage_nativeObj, long inputImage_nativeObj);
1025
1026    // C++:  double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType, TermCriteria criteria, Mat inputMask, int gaussFiltSize)
1027    private static native double findTransformECC_0(long templateImage_nativeObj, long inputImage_nativeObj, long warpMatrix_nativeObj, int motionType, int criteria_type, int criteria_maxCount, double criteria_epsilon, long inputMask_nativeObj, int gaussFiltSize);
1028
1029    // C++:  double cv::findTransformECC(Mat templateImage, Mat inputImage, Mat& warpMatrix, int motionType = MOTION_AFFINE, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001), Mat inputMask = Mat())
1030    private static native double findTransformECC_1(long templateImage_nativeObj, long inputImage_nativeObj, long warpMatrix_nativeObj, int motionType, int criteria_type, int criteria_maxCount, double criteria_epsilon, long inputMask_nativeObj);
1031    private static native double findTransformECC_2(long templateImage_nativeObj, long inputImage_nativeObj, long warpMatrix_nativeObj, int motionType, int criteria_type, int criteria_maxCount, double criteria_epsilon);
1032    private static native double findTransformECC_3(long templateImage_nativeObj, long inputImage_nativeObj, long warpMatrix_nativeObj, int motionType);
1033    private static native double findTransformECC_4(long templateImage_nativeObj, long inputImage_nativeObj, long warpMatrix_nativeObj);
1034
1035    // C++:  Mat cv::readOpticalFlow(String path)
1036    private static native long readOpticalFlow_0(String path);
1037
1038    // C++:  bool cv::writeOpticalFlow(String path, Mat flow)
1039    private static native boolean writeOpticalFlow_0(String path, long flow_nativeObj);
1040
1041}