001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.ml;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.core.Mat;
009import org.opencv.core.TermCriteria;
010import org.opencv.ml.EM;
011import org.opencv.ml.StatModel;
012import org.opencv.utils.Converters;
013
014// C++: class EM
015/**
016 * The class implements the Expectation Maximization algorithm.
017 *
018 * SEE: REF: ml_intro_em
019 */
020public class EM extends StatModel {
021
022    protected EM(long addr) { super(addr); }
023
024    // internal usage only
025    public static EM __fromPtr__(long addr) { return new EM(addr); }
026
027    // C++: enum <unnamed>
028    public static final int
029            DEFAULT_NCLUSTERS = 5,
030            DEFAULT_MAX_ITERS = 100,
031            START_E_STEP = 1,
032            START_M_STEP = 2,
033            START_AUTO_STEP = 0;
034
035
036    // C++: enum Types (cv.ml.EM.Types)
037    public static final int
038            COV_MAT_SPHERICAL = 0,
039            COV_MAT_DIAGONAL = 1,
040            COV_MAT_GENERIC = 2,
041            COV_MAT_DEFAULT = COV_MAT_DIAGONAL;
042
043
044    //
045    // C++:  int cv::ml::EM::getClustersNumber()
046    //
047
048    /**
049     * SEE: setClustersNumber
050     * @return automatically generated
051     */
052    public int getClustersNumber() {
053        return getClustersNumber_0(nativeObj);
054    }
055
056
057    //
058    // C++:  void cv::ml::EM::setClustersNumber(int val)
059    //
060
061    /**
062     *  getClustersNumber SEE: getClustersNumber
063     * @param val automatically generated
064     */
065    public void setClustersNumber(int val) {
066        setClustersNumber_0(nativeObj, val);
067    }
068
069
070    //
071    // C++:  int cv::ml::EM::getCovarianceMatrixType()
072    //
073
074    /**
075     * SEE: setCovarianceMatrixType
076     * @return automatically generated
077     */
078    public int getCovarianceMatrixType() {
079        return getCovarianceMatrixType_0(nativeObj);
080    }
081
082
083    //
084    // C++:  void cv::ml::EM::setCovarianceMatrixType(int val)
085    //
086
087    /**
088     *  getCovarianceMatrixType SEE: getCovarianceMatrixType
089     * @param val automatically generated
090     */
091    public void setCovarianceMatrixType(int val) {
092        setCovarianceMatrixType_0(nativeObj, val);
093    }
094
095
096    //
097    // C++:  TermCriteria cv::ml::EM::getTermCriteria()
098    //
099
100    /**
101     * SEE: setTermCriteria
102     * @return automatically generated
103     */
104    public TermCriteria getTermCriteria() {
105        return new TermCriteria(getTermCriteria_0(nativeObj));
106    }
107
108
109    //
110    // C++:  void cv::ml::EM::setTermCriteria(TermCriteria val)
111    //
112
113    /**
114     *  getTermCriteria SEE: getTermCriteria
115     * @param val automatically generated
116     */
117    public void setTermCriteria(TermCriteria val) {
118        setTermCriteria_0(nativeObj, val.type, val.maxCount, val.epsilon);
119    }
120
121
122    //
123    // C++:  Mat cv::ml::EM::getWeights()
124    //
125
126    /**
127     * Returns weights of the mixtures
128     *
129     *     Returns vector with the number of elements equal to the number of mixtures.
130     * @return automatically generated
131     */
132    public Mat getWeights() {
133        return new Mat(getWeights_0(nativeObj));
134    }
135
136
137    //
138    // C++:  Mat cv::ml::EM::getMeans()
139    //
140
141    /**
142     * Returns the cluster centers (means of the Gaussian mixture)
143     *
144     *     Returns matrix with the number of rows equal to the number of mixtures and number of columns
145     *     equal to the space dimensionality.
146     * @return automatically generated
147     */
148    public Mat getMeans() {
149        return new Mat(getMeans_0(nativeObj));
150    }
151
152
153    //
154    // C++:  void cv::ml::EM::getCovs(vector_Mat& covs)
155    //
156
157    /**
158     * Returns covariation matrices
159     *
160     *     Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
161     *     each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
162     * @param covs automatically generated
163     */
164    public void getCovs(List<Mat> covs) {
165        Mat covs_mat = new Mat();
166        getCovs_0(nativeObj, covs_mat.nativeObj);
167        Converters.Mat_to_vector_Mat(covs_mat, covs);
168        covs_mat.release();
169    }
170
171
172    //
173    // C++:  float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
174    //
175
176    /**
177     * Returns posterior probabilities for the provided samples
178     *
179     *     @param samples The input samples, floating-point matrix
180     *     @param results The optional output \( nSamples \times nClusters\) matrix of results. It contains
181     *     posterior probabilities for each sample from the input
182     *     @param flags This parameter will be ignored
183     * @return automatically generated
184     */
185    public float predict(Mat samples, Mat results, int flags) {
186        return predict_0(nativeObj, samples.nativeObj, results.nativeObj, flags);
187    }
188
189    /**
190     * Returns posterior probabilities for the provided samples
191     *
192     *     @param samples The input samples, floating-point matrix
193     *     @param results The optional output \( nSamples \times nClusters\) matrix of results. It contains
194     *     posterior probabilities for each sample from the input
195     * @return automatically generated
196     */
197    public float predict(Mat samples, Mat results) {
198        return predict_1(nativeObj, samples.nativeObj, results.nativeObj);
199    }
200
201    /**
202     * Returns posterior probabilities for the provided samples
203     *
204     *     @param samples The input samples, floating-point matrix
205     *     posterior probabilities for each sample from the input
206     * @return automatically generated
207     */
208    public float predict(Mat samples) {
209        return predict_2(nativeObj, samples.nativeObj);
210    }
211
212
213    //
214    // C++:  Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
215    //
216
217    /**
218     * Returns a likelihood logarithm value and an index of the most probable mixture component
219     *     for the given sample.
220     *
221     *     @param sample A sample for classification. It should be a one-channel matrix of
222     *         \(1 \times dims\) or \(dims \times 1\) size.
223     *     @param probs Optional output matrix that contains posterior probabilities of each component
224     *         given the sample. It has \(1 \times nclusters\) size and CV_64FC1 type.
225     *
226     *     The method returns a two-element double vector. Zero element is a likelihood logarithm value for
227     *     the sample. First element is an index of the most probable mixture component for the given
228     *     sample.
229     * @return automatically generated
230     */
231    public double[] predict2(Mat sample, Mat probs) {
232        return predict2_0(nativeObj, sample.nativeObj, probs.nativeObj);
233    }
234
235
236    //
237    // C++:  bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
238    //
239
240    /**
241     * Estimate the Gaussian mixture parameters from a samples set.
242     *
243     *     This variation starts with Expectation step. Initial values of the model parameters will be
244     *     estimated by the k-means algorithm.
245     *
246     *     Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
247     *     responses (class labels or function values) as input. Instead, it computes the *Maximum
248     *     Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
249     *     parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
250     *     covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
251     *     sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
252     *     probable mixture component for each sample).
253     *
254     *     The trained model can be used further for prediction, just like any other classifier. The
255     *     trained model is similar to the NormalBayesClassifier.
256     *
257     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
258     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
259     *         it will be converted to the inner matrix of such type for the further computing.
260     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
261     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
262     *     @param labels The optional output "class label" for each sample:
263     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
264     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
265     *     @param probs The optional output matrix that contains posterior probabilities of each Gaussian
266     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
267     *         CV_64FC1 type.
268     * @return automatically generated
269     */
270    public boolean trainEM(Mat samples, Mat logLikelihoods, Mat labels, Mat probs) {
271        return trainEM_0(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
272    }
273
274    /**
275     * Estimate the Gaussian mixture parameters from a samples set.
276     *
277     *     This variation starts with Expectation step. Initial values of the model parameters will be
278     *     estimated by the k-means algorithm.
279     *
280     *     Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
281     *     responses (class labels or function values) as input. Instead, it computes the *Maximum
282     *     Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
283     *     parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
284     *     covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
285     *     sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
286     *     probable mixture component for each sample).
287     *
288     *     The trained model can be used further for prediction, just like any other classifier. The
289     *     trained model is similar to the NormalBayesClassifier.
290     *
291     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
292     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
293     *         it will be converted to the inner matrix of such type for the further computing.
294     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
295     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
296     *     @param labels The optional output "class label" for each sample:
297     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
298     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
299     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
300     *         CV_64FC1 type.
301     * @return automatically generated
302     */
303    public boolean trainEM(Mat samples, Mat logLikelihoods, Mat labels) {
304        return trainEM_1(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
305    }
306
307    /**
308     * Estimate the Gaussian mixture parameters from a samples set.
309     *
310     *     This variation starts with Expectation step. Initial values of the model parameters will be
311     *     estimated by the k-means algorithm.
312     *
313     *     Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
314     *     responses (class labels or function values) as input. Instead, it computes the *Maximum
315     *     Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
316     *     parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
317     *     covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
318     *     sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
319     *     probable mixture component for each sample).
320     *
321     *     The trained model can be used further for prediction, just like any other classifier. The
322     *     trained model is similar to the NormalBayesClassifier.
323     *
324     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
325     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
326     *         it will be converted to the inner matrix of such type for the further computing.
327     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
328     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
329     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
330     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
331     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
332     *         CV_64FC1 type.
333     * @return automatically generated
334     */
335    public boolean trainEM(Mat samples, Mat logLikelihoods) {
336        return trainEM_2(nativeObj, samples.nativeObj, logLikelihoods.nativeObj);
337    }
338
339    /**
340     * Estimate the Gaussian mixture parameters from a samples set.
341     *
342     *     This variation starts with Expectation step. Initial values of the model parameters will be
343     *     estimated by the k-means algorithm.
344     *
345     *     Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
346     *     responses (class labels or function values) as input. Instead, it computes the *Maximum
347     *     Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
348     *     parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
349     *     covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
350     *     sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
351     *     probable mixture component for each sample).
352     *
353     *     The trained model can be used further for prediction, just like any other classifier. The
354     *     trained model is similar to the NormalBayesClassifier.
355     *
356     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
357     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
358     *         it will be converted to the inner matrix of such type for the further computing.
359     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
360     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
361     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
362     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
363     *         CV_64FC1 type.
364     * @return automatically generated
365     */
366    public boolean trainEM(Mat samples) {
367        return trainEM_3(nativeObj, samples.nativeObj);
368    }
369
370
371    //
372    // C++:  bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
373    //
374
375    /**
376     * Estimate the Gaussian mixture parameters from a samples set.
377     *
378     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
379     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
380     *     \(S_k\) of mixture components.
381     *
382     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
383     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
384     *         it will be converted to the inner matrix of such type for the further computing.
385     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
386     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
387     *         converted to the inner matrix of such type for the further computing.
388     *     @param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
389     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
390     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
391     *         further computing.
392     *     @param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
393     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
394     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
395     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
396     *     @param labels The optional output "class label" for each sample:
397     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
398     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
399     *     @param probs The optional output matrix that contains posterior probabilities of each Gaussian
400     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
401     *         CV_64FC1 type.
402     * @return automatically generated
403     */
404    public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels, Mat probs) {
405        return trainE_0(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
406    }
407
408    /**
409     * Estimate the Gaussian mixture parameters from a samples set.
410     *
411     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
412     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
413     *     \(S_k\) of mixture components.
414     *
415     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
416     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
417     *         it will be converted to the inner matrix of such type for the further computing.
418     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
419     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
420     *         converted to the inner matrix of such type for the further computing.
421     *     @param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
422     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
423     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
424     *         further computing.
425     *     @param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
426     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
427     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
428     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
429     *     @param labels The optional output "class label" for each sample:
430     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
431     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
432     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
433     *         CV_64FC1 type.
434     * @return automatically generated
435     */
436    public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels) {
437        return trainE_1(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
438    }
439
440    /**
441     * Estimate the Gaussian mixture parameters from a samples set.
442     *
443     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
444     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
445     *     \(S_k\) of mixture components.
446     *
447     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
448     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
449     *         it will be converted to the inner matrix of such type for the further computing.
450     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
451     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
452     *         converted to the inner matrix of such type for the further computing.
453     *     @param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
454     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
455     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
456     *         further computing.
457     *     @param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
458     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
459     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
460     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
461     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
462     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
463     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
464     *         CV_64FC1 type.
465     * @return automatically generated
466     */
467    public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods) {
468        return trainE_2(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj);
469    }
470
471    /**
472     * Estimate the Gaussian mixture parameters from a samples set.
473     *
474     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
475     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
476     *     \(S_k\) of mixture components.
477     *
478     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
479     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
480     *         it will be converted to the inner matrix of such type for the further computing.
481     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
482     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
483     *         converted to the inner matrix of such type for the further computing.
484     *     @param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
485     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
486     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
487     *         further computing.
488     *     @param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
489     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
490     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
491     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
492     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
493     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
494     *         CV_64FC1 type.
495     * @return automatically generated
496     */
497    public boolean trainE(Mat samples, Mat means0, Mat covs0, Mat weights0) {
498        return trainE_3(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj);
499    }
500
501    /**
502     * Estimate the Gaussian mixture parameters from a samples set.
503     *
504     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
505     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
506     *     \(S_k\) of mixture components.
507     *
508     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
509     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
510     *         it will be converted to the inner matrix of such type for the further computing.
511     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
512     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
513     *         converted to the inner matrix of such type for the further computing.
514     *     @param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
515     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
516     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
517     *         further computing.
518     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
519     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
520     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
521     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
522     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
523     *         CV_64FC1 type.
524     * @return automatically generated
525     */
526    public boolean trainE(Mat samples, Mat means0, Mat covs0) {
527        return trainE_4(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj);
528    }
529
530    /**
531     * Estimate the Gaussian mixture parameters from a samples set.
532     *
533     *     This variation starts with Expectation step. You need to provide initial means \(a_k\) of
534     *     mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
535     *     \(S_k\) of mixture components.
536     *
537     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
538     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
539     *         it will be converted to the inner matrix of such type for the further computing.
540     *     @param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
541     *         \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
542     *         converted to the inner matrix of such type for the further computing.
543     *         covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
544     *         do not have CV_64F type they will be converted to the inner matrices of such type for the
545     *         further computing.
546     *         floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
547     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
548     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
549     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
550     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
551     *         CV_64FC1 type.
552     * @return automatically generated
553     */
554    public boolean trainE(Mat samples, Mat means0) {
555        return trainE_5(nativeObj, samples.nativeObj, means0.nativeObj);
556    }
557
558
559    //
560    // C++:  bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
561    //
562
563    /**
564     * Estimate the Gaussian mixture parameters from a samples set.
565     *
566     *     This variation starts with Maximization step. You need to provide initial probabilities
567     *     \(p_{i,k}\) to use this option.
568     *
569     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
570     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
571     *         it will be converted to the inner matrix of such type for the further computing.
572     *     @param probs0 the probabilities
573     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
574     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
575     *     @param labels The optional output "class label" for each sample:
576     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
577     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
578     *     @param probs The optional output matrix that contains posterior probabilities of each Gaussian
579     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
580     *         CV_64FC1 type.
581     * @return automatically generated
582     */
583    public boolean trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels, Mat probs) {
584        return trainM_0(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
585    }
586
587    /**
588     * Estimate the Gaussian mixture parameters from a samples set.
589     *
590     *     This variation starts with Maximization step. You need to provide initial probabilities
591     *     \(p_{i,k}\) to use this option.
592     *
593     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
594     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
595     *         it will be converted to the inner matrix of such type for the further computing.
596     *     @param probs0 the probabilities
597     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
598     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
599     *     @param labels The optional output "class label" for each sample:
600     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
601     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
602     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
603     *         CV_64FC1 type.
604     * @return automatically generated
605     */
606    public boolean trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels) {
607        return trainM_1(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
608    }
609
610    /**
611     * Estimate the Gaussian mixture parameters from a samples set.
612     *
613     *     This variation starts with Maximization step. You need to provide initial probabilities
614     *     \(p_{i,k}\) to use this option.
615     *
616     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
617     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
618     *         it will be converted to the inner matrix of such type for the further computing.
619     *     @param probs0 the probabilities
620     *     @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
621     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
622     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
623     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
624     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
625     *         CV_64FC1 type.
626     * @return automatically generated
627     */
628    public boolean trainM(Mat samples, Mat probs0, Mat logLikelihoods) {
629        return trainM_2(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj);
630    }
631
632    /**
633     * Estimate the Gaussian mixture parameters from a samples set.
634     *
635     *     This variation starts with Maximization step. You need to provide initial probabilities
636     *     \(p_{i,k}\) to use this option.
637     *
638     *     @param samples Samples from which the Gaussian mixture model will be estimated. It should be a
639     *         one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
640     *         it will be converted to the inner matrix of such type for the further computing.
641     *     @param probs0 the probabilities
642     *         each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
643     *         \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
644     *         mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
645     *         mixture component given the each sample. It has \(nsamples \times nclusters\) size and
646     *         CV_64FC1 type.
647     * @return automatically generated
648     */
649    public boolean trainM(Mat samples, Mat probs0) {
650        return trainM_3(nativeObj, samples.nativeObj, probs0.nativeObj);
651    }
652
653
654    //
655    // C++: static Ptr_EM cv::ml::EM::create()
656    //
657
658    /**
659     * Creates empty %EM model.
660     *     The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
661     *     can use one of the EM::train\* methods or load it from file using Algorithm::load&lt;EM&gt;(filename).
662     * @return automatically generated
663     */
664    public static EM create() {
665        return EM.__fromPtr__(create_0());
666    }
667
668
669    //
670    // C++: static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
671    //
672
673    /**
674     * Loads and creates a serialized EM from a file
675     *
676     * Use EM::save to serialize and store an EM to disk.
677     * Load the EM from this file again, by calling this function with the path to the file.
678     * Optionally specify the node for the file containing the classifier
679     *
680     * @param filepath path to serialized EM
681     * @param nodeName name of node containing the classifier
682     * @return automatically generated
683     */
684    public static EM load(String filepath, String nodeName) {
685        return EM.__fromPtr__(load_0(filepath, nodeName));
686    }
687
688    /**
689     * Loads and creates a serialized EM from a file
690     *
691     * Use EM::save to serialize and store an EM to disk.
692     * Load the EM from this file again, by calling this function with the path to the file.
693     * Optionally specify the node for the file containing the classifier
694     *
695     * @param filepath path to serialized EM
696     * @return automatically generated
697     */
698    public static EM load(String filepath) {
699        return EM.__fromPtr__(load_1(filepath));
700    }
701
702
703    @Override
704    protected void finalize() throws Throwable {
705        delete(nativeObj);
706    }
707
708
709
710    // C++:  int cv::ml::EM::getClustersNumber()
711    private static native int getClustersNumber_0(long nativeObj);
712
713    // C++:  void cv::ml::EM::setClustersNumber(int val)
714    private static native void setClustersNumber_0(long nativeObj, int val);
715
716    // C++:  int cv::ml::EM::getCovarianceMatrixType()
717    private static native int getCovarianceMatrixType_0(long nativeObj);
718
719    // C++:  void cv::ml::EM::setCovarianceMatrixType(int val)
720    private static native void setCovarianceMatrixType_0(long nativeObj, int val);
721
722    // C++:  TermCriteria cv::ml::EM::getTermCriteria()
723    private static native double[] getTermCriteria_0(long nativeObj);
724
725    // C++:  void cv::ml::EM::setTermCriteria(TermCriteria val)
726    private static native void setTermCriteria_0(long nativeObj, int val_type, int val_maxCount, double val_epsilon);
727
728    // C++:  Mat cv::ml::EM::getWeights()
729    private static native long getWeights_0(long nativeObj);
730
731    // C++:  Mat cv::ml::EM::getMeans()
732    private static native long getMeans_0(long nativeObj);
733
734    // C++:  void cv::ml::EM::getCovs(vector_Mat& covs)
735    private static native void getCovs_0(long nativeObj, long covs_mat_nativeObj);
736
737    // C++:  float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
738    private static native float predict_0(long nativeObj, long samples_nativeObj, long results_nativeObj, int flags);
739    private static native float predict_1(long nativeObj, long samples_nativeObj, long results_nativeObj);
740    private static native float predict_2(long nativeObj, long samples_nativeObj);
741
742    // C++:  Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
743    private static native double[] predict2_0(long nativeObj, long sample_nativeObj, long probs_nativeObj);
744
745    // C++:  bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
746    private static native boolean trainEM_0(long nativeObj, long samples_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj);
747    private static native boolean trainEM_1(long nativeObj, long samples_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj);
748    private static native boolean trainEM_2(long nativeObj, long samples_nativeObj, long logLikelihoods_nativeObj);
749    private static native boolean trainEM_3(long nativeObj, long samples_nativeObj);
750
751    // C++:  bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
752    private static native boolean trainE_0(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj);
753    private static native boolean trainE_1(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj);
754    private static native boolean trainE_2(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj, long logLikelihoods_nativeObj);
755    private static native boolean trainE_3(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj, long weights0_nativeObj);
756    private static native boolean trainE_4(long nativeObj, long samples_nativeObj, long means0_nativeObj, long covs0_nativeObj);
757    private static native boolean trainE_5(long nativeObj, long samples_nativeObj, long means0_nativeObj);
758
759    // C++:  bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
760    private static native boolean trainM_0(long nativeObj, long samples_nativeObj, long probs0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj, long probs_nativeObj);
761    private static native boolean trainM_1(long nativeObj, long samples_nativeObj, long probs0_nativeObj, long logLikelihoods_nativeObj, long labels_nativeObj);
762    private static native boolean trainM_2(long nativeObj, long samples_nativeObj, long probs0_nativeObj, long logLikelihoods_nativeObj);
763    private static native boolean trainM_3(long nativeObj, long samples_nativeObj, long probs0_nativeObj);
764
765    // C++: static Ptr_EM cv::ml::EM::create()
766    private static native long create_0();
767
768    // C++: static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
769    private static native long load_0(String filepath, String nodeName);
770    private static native long load_1(String filepath);
771
772    // native support for java finalize()
773    private static native void delete(long nativeObj);
774
775}