001//
002// This file is auto-generated. Please don't modify it!
003//
004package org.opencv.dnn;
005
006import java.util.ArrayList;
007import java.util.List;
008import org.opencv.core.Mat;
009import org.opencv.core.MatOfByte;
010import org.opencv.core.MatOfDouble;
011import org.opencv.core.MatOfFloat;
012import org.opencv.core.MatOfInt;
013import org.opencv.core.Scalar;
014import org.opencv.dnn.DictValue;
015import org.opencv.dnn.Layer;
016import org.opencv.dnn.Net;
017import org.opencv.utils.Converters;
018
019// C++: class Net
020/**
021 * This class allows to create and manipulate comprehensive artificial neural networks.
022 *
023 * Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances,
024 * and edges specify relationships between layers inputs and outputs.
025 *
026 * Each network layer has unique integer id and unique string name inside its network.
027 * LayerId can store either layer name or layer id.
028 *
029 * This class supports reference counting of its instances, i. e. copies point to the same instance.
030 */
031public class Net {
032
033    protected final long nativeObj;
034    protected Net(long addr) { nativeObj = addr; }
035
036    public long getNativeObjAddr() { return nativeObj; }
037
038    // internal usage only
039    public static Net __fromPtr__(long addr) { return new Net(addr); }
040
041    //
042    // C++:   cv::dnn::Net::Net()
043    //
044
045    public Net() {
046        nativeObj = Net_0();
047    }
048
049
050    //
051    // C++: static Net cv::dnn::Net::readFromModelOptimizer(String xml, String bin)
052    //
053
054    /**
055     * Create a network from Intel's Model Optimizer intermediate representation (IR).
056     * @param xml XML configuration file with network's topology.
057     * @param bin Binary file with trained weights.
058     * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
059     * backend.
060     * @return automatically generated
061     */
062    public static Net readFromModelOptimizer(String xml, String bin) {
063        return new Net(readFromModelOptimizer_0(xml, bin));
064    }
065
066
067    //
068    // C++: static Net cv::dnn::Net::readFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
069    //
070
071    /**
072     * Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
073     * @param bufferModelConfig buffer with model's configuration.
074     * @param bufferWeights buffer with model's trained weights.
075     * @return Net object.
076     */
077    public static Net readFromModelOptimizer(MatOfByte bufferModelConfig, MatOfByte bufferWeights) {
078        Mat bufferModelConfig_mat = bufferModelConfig;
079        Mat bufferWeights_mat = bufferWeights;
080        return new Net(readFromModelOptimizer_1(bufferModelConfig_mat.nativeObj, bufferWeights_mat.nativeObj));
081    }
082
083
084    //
085    // C++:  bool cv::dnn::Net::empty()
086    //
087
088    /**
089     * Returns true if there are no layers in the network.
090     * @return automatically generated
091     */
092    public boolean empty() {
093        return empty_0(nativeObj);
094    }
095
096
097    //
098    // C++:  String cv::dnn::Net::dump()
099    //
100
101    /**
102     * Dump net to String
103     * @return String with structure, hyperparameters, backend, target and fusion
104     * Call method after setInput(). To see correct backend, target and fusion run after forward().
105     */
106    public String dump() {
107        return dump_0(nativeObj);
108    }
109
110
111    //
112    // C++:  void cv::dnn::Net::dumpToFile(String path)
113    //
114
115    /**
116     * Dump net structure, hyperparameters, backend, target and fusion to dot file
117     * @param path   path to output file with .dot extension
118     * SEE: dump()
119     */
120    public void dumpToFile(String path) {
121        dumpToFile_0(nativeObj, path);
122    }
123
124
125    //
126    // C++:  int cv::dnn::Net::getLayerId(String layer)
127    //
128
129    /**
130     * Converts string name of the layer to the integer identifier.
131     * @return id of the layer, or -1 if the layer wasn't found.
132     * @param layer automatically generated
133     */
134    public int getLayerId(String layer) {
135        return getLayerId_0(nativeObj, layer);
136    }
137
138
139    //
140    // C++:  vector_String cv::dnn::Net::getLayerNames()
141    //
142
143    public List<String> getLayerNames() {
144        return getLayerNames_0(nativeObj);
145    }
146
147
148    //
149    // C++:  Ptr_Layer cv::dnn::Net::getLayer(int layerId)
150    //
151
152    /**
153     * Returns pointer to layer with specified id or name which the network use.
154     * @param layerId automatically generated
155     * @return automatically generated
156     */
157    public Layer getLayer(int layerId) {
158        return Layer.__fromPtr__(getLayer_0(nativeObj, layerId));
159    }
160
161
162    //
163    // C++:  Ptr_Layer cv::dnn::Net::getLayer(String layerName)
164    //
165
166    /**
167     *
168     * @deprecated Use int getLayerId(const String &amp;layer)
169     * @param layerName automatically generated
170     * @return automatically generated
171     */
172    @Deprecated
173    public Layer getLayer(String layerName) {
174        return Layer.__fromPtr__(getLayer_1(nativeObj, layerName));
175    }
176
177
178    //
179    // C++:  Ptr_Layer cv::dnn::Net::getLayer(LayerId layerId)
180    //
181
182    /**
183     *
184     * @deprecated to be removed
185     * @param layerId automatically generated
186     * @return automatically generated
187     */
188    @Deprecated
189    public Layer getLayer(DictValue layerId) {
190        return Layer.__fromPtr__(getLayer_2(nativeObj, layerId.getNativeObjAddr()));
191    }
192
193
194    //
195    // C++:  void cv::dnn::Net::connect(String outPin, String inpPin)
196    //
197
198    /**
199     * Connects output of the first layer to input of the second layer.
200     * @param outPin descriptor of the first layer output.
201     * @param inpPin descriptor of the second layer input.
202     *
203     * Descriptors have the following template &lt;DFN&gt;&amp;lt;layer_name&amp;gt;[.input_number]&lt;/DFN&gt;:
204     * - the first part of the template &lt;DFN&gt;layer_name&lt;/DFN&gt; is string name of the added layer.
205     * If this part is empty then the network input pseudo layer will be used;
206     * - the second optional part of the template &lt;DFN&gt;input_number&lt;/DFN&gt;
207     * is either number of the layer input, either label one.
208     * If this part is omitted then the first layer input will be used.
209     *
210     * SEE: setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex()
211     */
212    public void connect(String outPin, String inpPin) {
213        connect_0(nativeObj, outPin, inpPin);
214    }
215
216
217    //
218    // C++:  void cv::dnn::Net::setInputsNames(vector_String inputBlobNames)
219    //
220
221    /**
222     * Sets outputs names of the network input pseudo layer.
223     *
224     * Each net always has special own the network input pseudo layer with id=0.
225     * This layer stores the user blobs only and don't make any computations.
226     * In fact, this layer provides the only way to pass user data into the network.
227     * As any other layer, this layer can label its outputs and this function provides an easy way to do this.
228     * @param inputBlobNames automatically generated
229     */
230    public void setInputsNames(List<String> inputBlobNames) {
231        setInputsNames_0(nativeObj, inputBlobNames);
232    }
233
234
235    //
236    // C++:  void cv::dnn::Net::setInputShape(String inputName, MatShape shape)
237    //
238
239    /**
240     * Specify shape of network input.
241     * @param inputName automatically generated
242     * @param shape automatically generated
243     */
244    public void setInputShape(String inputName, MatOfInt shape) {
245        Mat shape_mat = shape;
246        setInputShape_0(nativeObj, inputName, shape_mat.nativeObj);
247    }
248
249
250    //
251    // C++:  Mat cv::dnn::Net::forward(String outputName = String())
252    //
253
254    /**
255     * Runs forward pass to compute output of layer with name {@code outputName}.
256     * @param outputName name for layer which output is needed to get
257     * @return blob for first output of specified layer.
258     * By default runs forward pass for the whole network.
259     */
260    public Mat forward(String outputName) {
261        return new Mat(forward_0(nativeObj, outputName));
262    }
263
264    /**
265     * Runs forward pass to compute output of layer with name {@code outputName}.
266     * @return blob for first output of specified layer.
267     * By default runs forward pass for the whole network.
268     */
269    public Mat forward() {
270        return new Mat(forward_1(nativeObj));
271    }
272
273
274    //
275    // C++:  AsyncArray cv::dnn::Net::forwardAsync(String outputName = String())
276    //
277
278    // Return type 'AsyncArray' is not supported, skipping the function
279
280
281    //
282    // C++:  void cv::dnn::Net::forward(vector_Mat& outputBlobs, String outputName = String())
283    //
284
285    /**
286     * Runs forward pass to compute output of layer with name {@code outputName}.
287     * @param outputBlobs contains all output blobs for specified layer.
288     * @param outputName name for layer which output is needed to get
289     * If {@code outputName} is empty, runs forward pass for the whole network.
290     */
291    public void forward(List<Mat> outputBlobs, String outputName) {
292        Mat outputBlobs_mat = new Mat();
293        forward_2(nativeObj, outputBlobs_mat.nativeObj, outputName);
294        Converters.Mat_to_vector_Mat(outputBlobs_mat, outputBlobs);
295        outputBlobs_mat.release();
296    }
297
298    /**
299     * Runs forward pass to compute output of layer with name {@code outputName}.
300     * @param outputBlobs contains all output blobs for specified layer.
301     * If {@code outputName} is empty, runs forward pass for the whole network.
302     */
303    public void forward(List<Mat> outputBlobs) {
304        Mat outputBlobs_mat = new Mat();
305        forward_3(nativeObj, outputBlobs_mat.nativeObj);
306        Converters.Mat_to_vector_Mat(outputBlobs_mat, outputBlobs);
307        outputBlobs_mat.release();
308    }
309
310
311    //
312    // C++:  void cv::dnn::Net::forward(vector_Mat& outputBlobs, vector_String outBlobNames)
313    //
314
315    /**
316     * Runs forward pass to compute outputs of layers listed in {@code outBlobNames}.
317     * @param outputBlobs contains blobs for first outputs of specified layers.
318     * @param outBlobNames names for layers which outputs are needed to get
319     */
320    public void forward(List<Mat> outputBlobs, List<String> outBlobNames) {
321        Mat outputBlobs_mat = new Mat();
322        forward_4(nativeObj, outputBlobs_mat.nativeObj, outBlobNames);
323        Converters.Mat_to_vector_Mat(outputBlobs_mat, outputBlobs);
324        outputBlobs_mat.release();
325    }
326
327
328    //
329    // C++:  void cv::dnn::Net::forward(vector_vector_Mat& outputBlobs, vector_String outBlobNames)
330    //
331
332    // Unknown type 'vector_vector_Mat' (O), skipping the function
333
334
335    //
336    // C++:  Net cv::dnn::Net::quantize(vector_Mat calibData, int inputsDtype, int outputsDtype, bool perChannel = true)
337    //
338
339    /**
340     * Returns a quantized Net from a floating-point Net.
341     * @param calibData Calibration data to compute the quantization parameters.
342     * @param inputsDtype Datatype of quantized net's inputs. Can be CV_32F or CV_8S.
343     * @param outputsDtype Datatype of quantized net's outputs. Can be CV_32F or CV_8S.
344     * @param perChannel Quantization granularity of quantized Net. The default is true, that means quantize model
345     * in per-channel way (channel-wise). Set it false to quantize model in per-tensor way (or tensor-wise).
346     * @return automatically generated
347     */
348    public Net quantize(List<Mat> calibData, int inputsDtype, int outputsDtype, boolean perChannel) {
349        Mat calibData_mat = Converters.vector_Mat_to_Mat(calibData);
350        return new Net(quantize_0(nativeObj, calibData_mat.nativeObj, inputsDtype, outputsDtype, perChannel));
351    }
352
353    /**
354     * Returns a quantized Net from a floating-point Net.
355     * @param calibData Calibration data to compute the quantization parameters.
356     * @param inputsDtype Datatype of quantized net's inputs. Can be CV_32F or CV_8S.
357     * @param outputsDtype Datatype of quantized net's outputs. Can be CV_32F or CV_8S.
358     * in per-channel way (channel-wise). Set it false to quantize model in per-tensor way (or tensor-wise).
359     * @return automatically generated
360     */
361    public Net quantize(List<Mat> calibData, int inputsDtype, int outputsDtype) {
362        Mat calibData_mat = Converters.vector_Mat_to_Mat(calibData);
363        return new Net(quantize_1(nativeObj, calibData_mat.nativeObj, inputsDtype, outputsDtype));
364    }
365
366
367    //
368    // C++:  void cv::dnn::Net::getInputDetails(vector_float& scales, vector_int& zeropoints)
369    //
370
371    /**
372     * Returns input scale and zeropoint for a quantized Net.
373     * @param scales output parameter for returning input scales.
374     * @param zeropoints output parameter for returning input zeropoints.
375     */
376    public void getInputDetails(MatOfFloat scales, MatOfInt zeropoints) {
377        Mat scales_mat = scales;
378        Mat zeropoints_mat = zeropoints;
379        getInputDetails_0(nativeObj, scales_mat.nativeObj, zeropoints_mat.nativeObj);
380    }
381
382
383    //
384    // C++:  void cv::dnn::Net::getOutputDetails(vector_float& scales, vector_int& zeropoints)
385    //
386
387    /**
388     * Returns output scale and zeropoint for a quantized Net.
389     * @param scales output parameter for returning output scales.
390     * @param zeropoints output parameter for returning output zeropoints.
391     */
392    public void getOutputDetails(MatOfFloat scales, MatOfInt zeropoints) {
393        Mat scales_mat = scales;
394        Mat zeropoints_mat = zeropoints;
395        getOutputDetails_0(nativeObj, scales_mat.nativeObj, zeropoints_mat.nativeObj);
396    }
397
398
399    //
400    // C++:  void cv::dnn::Net::setHalideScheduler(String scheduler)
401    //
402
403    /**
404     * Compile Halide layers.
405     * @param scheduler Path to YAML file with scheduling directives.
406     * SEE: setPreferableBackend
407     *
408     * Schedule layers that support Halide backend. Then compile them for
409     * specific target. For layers that not represented in scheduling file
410     * or if no manual scheduling used at all, automatic scheduling will be applied.
411     */
412    public void setHalideScheduler(String scheduler) {
413        setHalideScheduler_0(nativeObj, scheduler);
414    }
415
416
417    //
418    // C++:  void cv::dnn::Net::setPreferableBackend(int backendId)
419    //
420
421    /**
422     * Ask network to use specific computation backend where it supported.
423     * @param backendId backend identifier.
424     * SEE: Backend
425     *
426     * If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT
427     * means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV.
428     */
429    public void setPreferableBackend(int backendId) {
430        setPreferableBackend_0(nativeObj, backendId);
431    }
432
433
434    //
435    // C++:  void cv::dnn::Net::setPreferableTarget(int targetId)
436    //
437
438    /**
439     * Ask network to make computations on specific target device.
440     * @param targetId target identifier.
441     * SEE: Target
442     *
443     * List of supported combinations backend / target:
444     * |                        | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE |  DNN_BACKEND_CUDA |
445     * |------------------------|--------------------|------------------------------|--------------------|-------------------|
446     * | DNN_TARGET_CPU         |                  + |                            + |                  + |                   |
447     * | DNN_TARGET_OPENCL      |                  + |                            + |                  + |                   |
448     * | DNN_TARGET_OPENCL_FP16 |                  + |                            + |                    |                   |
449     * | DNN_TARGET_MYRIAD      |                    |                            + |                    |                   |
450     * | DNN_TARGET_FPGA        |                    |                            + |                    |                   |
451     * | DNN_TARGET_CUDA        |                    |                              |                    |                 + |
452     * | DNN_TARGET_CUDA_FP16   |                    |                              |                    |                 + |
453     * | DNN_TARGET_HDDL        |                    |                            + |                    |                   |
454     */
455    public void setPreferableTarget(int targetId) {
456        setPreferableTarget_0(nativeObj, targetId);
457    }
458
459
460    //
461    // C++:  void cv::dnn::Net::setInput(Mat blob, String name = "", double scalefactor = 1.0, Scalar mean = Scalar())
462    //
463
464    /**
465     * Sets the new input value for the network
466     * @param blob        A new blob. Should have CV_32F or CV_8U depth.
467     * @param name        A name of input layer.
468     * @param scalefactor An optional normalization scale.
469     * @param mean        An optional mean subtraction values.
470     * SEE: connect(String, String) to know format of the descriptor.
471     *
472     * If scale or mean values are specified, a final input blob is computed
473     * as:
474     * \(input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\)
475     */
476    public void setInput(Mat blob, String name, double scalefactor, Scalar mean) {
477        setInput_0(nativeObj, blob.nativeObj, name, scalefactor, mean.val[0], mean.val[1], mean.val[2], mean.val[3]);
478    }
479
480    /**
481     * Sets the new input value for the network
482     * @param blob        A new blob. Should have CV_32F or CV_8U depth.
483     * @param name        A name of input layer.
484     * @param scalefactor An optional normalization scale.
485     * SEE: connect(String, String) to know format of the descriptor.
486     *
487     * If scale or mean values are specified, a final input blob is computed
488     * as:
489     * \(input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\)
490     */
491    public void setInput(Mat blob, String name, double scalefactor) {
492        setInput_1(nativeObj, blob.nativeObj, name, scalefactor);
493    }
494
495    /**
496     * Sets the new input value for the network
497     * @param blob        A new blob. Should have CV_32F or CV_8U depth.
498     * @param name        A name of input layer.
499     * SEE: connect(String, String) to know format of the descriptor.
500     *
501     * If scale or mean values are specified, a final input blob is computed
502     * as:
503     * \(input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\)
504     */
505    public void setInput(Mat blob, String name) {
506        setInput_2(nativeObj, blob.nativeObj, name);
507    }
508
509    /**
510     * Sets the new input value for the network
511     * @param blob        A new blob. Should have CV_32F or CV_8U depth.
512     * SEE: connect(String, String) to know format of the descriptor.
513     *
514     * If scale or mean values are specified, a final input blob is computed
515     * as:
516     * \(input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\)
517     */
518    public void setInput(Mat blob) {
519        setInput_3(nativeObj, blob.nativeObj);
520    }
521
522
523    //
524    // C++:  void cv::dnn::Net::setParam(int layer, int numParam, Mat blob)
525    //
526
527    /**
528     * Sets the new value for the learned param of the layer.
529     * @param layer name or id of the layer.
530     * @param numParam index of the layer parameter in the Layer::blobs array.
531     * @param blob the new value.
532     * SEE: Layer::blobs
533     * <b>Note:</b> If shape of the new blob differs from the previous shape,
534     * then the following forward pass may fail.
535     */
536    public void setParam(int layer, int numParam, Mat blob) {
537        setParam_0(nativeObj, layer, numParam, blob.nativeObj);
538    }
539
540
541    //
542    // C++:  void cv::dnn::Net::setParam(String layerName, int numParam, Mat blob)
543    //
544
545    public void setParam(String layerName, int numParam, Mat blob) {
546        setParam_1(nativeObj, layerName, numParam, blob.nativeObj);
547    }
548
549
550    //
551    // C++:  Mat cv::dnn::Net::getParam(int layer, int numParam = 0)
552    //
553
554    /**
555     * Returns parameter blob of the layer.
556     * @param layer name or id of the layer.
557     * @param numParam index of the layer parameter in the Layer::blobs array.
558     * SEE: Layer::blobs
559     * @return automatically generated
560     */
561    public Mat getParam(int layer, int numParam) {
562        return new Mat(getParam_0(nativeObj, layer, numParam));
563    }
564
565    /**
566     * Returns parameter blob of the layer.
567     * @param layer name or id of the layer.
568     * SEE: Layer::blobs
569     * @return automatically generated
570     */
571    public Mat getParam(int layer) {
572        return new Mat(getParam_1(nativeObj, layer));
573    }
574
575
576    //
577    // C++:  Mat cv::dnn::Net::getParam(String layerName, int numParam = 0)
578    //
579
580    public Mat getParam(String layerName, int numParam) {
581        return new Mat(getParam_2(nativeObj, layerName, numParam));
582    }
583
584    public Mat getParam(String layerName) {
585        return new Mat(getParam_3(nativeObj, layerName));
586    }
587
588
589    //
590    // C++:  vector_int cv::dnn::Net::getUnconnectedOutLayers()
591    //
592
593    /**
594     * Returns indexes of layers with unconnected outputs.
595     *
596     * FIXIT: Rework API to registerOutput() approach, deprecate this call
597     * @return automatically generated
598     */
599    public MatOfInt getUnconnectedOutLayers() {
600        return MatOfInt.fromNativeAddr(getUnconnectedOutLayers_0(nativeObj));
601    }
602
603
604    //
605    // C++:  vector_String cv::dnn::Net::getUnconnectedOutLayersNames()
606    //
607
608    /**
609     * Returns names of layers with unconnected outputs.
610     *
611     * FIXIT: Rework API to registerOutput() approach, deprecate this call
612     * @return automatically generated
613     */
614    public List<String> getUnconnectedOutLayersNames() {
615        return getUnconnectedOutLayersNames_0(nativeObj);
616    }
617
618
619    //
620    // C++:  void cv::dnn::Net::getLayersShapes(vector_MatShape netInputShapes, vector_int& layersIds, vector_vector_MatShape& inLayersShapes, vector_vector_MatShape& outLayersShapes)
621    //
622
623    // Unknown type 'vector_vector_MatShape' (O), skipping the function
624
625
626    //
627    // C++:  void cv::dnn::Net::getLayersShapes(MatShape netInputShape, vector_int& layersIds, vector_vector_MatShape& inLayersShapes, vector_vector_MatShape& outLayersShapes)
628    //
629
630    // Unknown type 'vector_vector_MatShape' (O), skipping the function
631
632
633    //
634    // C++:  int64 cv::dnn::Net::getFLOPS(vector_MatShape netInputShapes)
635    //
636
637    /**
638     * Computes FLOP for whole loaded model with specified input shapes.
639     * @param netInputShapes vector of shapes for all net inputs.
640     * @return computed FLOP.
641     */
642    public long getFLOPS(List<MatOfInt> netInputShapes) {
643        return getFLOPS_0(nativeObj, netInputShapes);
644    }
645
646
647    //
648    // C++:  int64 cv::dnn::Net::getFLOPS(MatShape netInputShape)
649    //
650
651    public long getFLOPS(MatOfInt netInputShape) {
652        Mat netInputShape_mat = netInputShape;
653        return getFLOPS_1(nativeObj, netInputShape_mat.nativeObj);
654    }
655
656
657    //
658    // C++:  int64 cv::dnn::Net::getFLOPS(int layerId, vector_MatShape netInputShapes)
659    //
660
661    public long getFLOPS(int layerId, List<MatOfInt> netInputShapes) {
662        return getFLOPS_2(nativeObj, layerId, netInputShapes);
663    }
664
665
666    //
667    // C++:  int64 cv::dnn::Net::getFLOPS(int layerId, MatShape netInputShape)
668    //
669
670    public long getFLOPS(int layerId, MatOfInt netInputShape) {
671        Mat netInputShape_mat = netInputShape;
672        return getFLOPS_3(nativeObj, layerId, netInputShape_mat.nativeObj);
673    }
674
675
676    //
677    // C++:  void cv::dnn::Net::getLayerTypes(vector_String& layersTypes)
678    //
679
680    /**
681     * Returns list of types for layer used in model.
682     * @param layersTypes output parameter for returning types.
683     */
684    public void getLayerTypes(List<String> layersTypes) {
685        getLayerTypes_0(nativeObj, layersTypes);
686    }
687
688
689    //
690    // C++:  int cv::dnn::Net::getLayersCount(String layerType)
691    //
692
693    /**
694     * Returns count of layers of specified type.
695     * @param layerType type.
696     * @return count of layers
697     */
698    public int getLayersCount(String layerType) {
699        return getLayersCount_0(nativeObj, layerType);
700    }
701
702
703    //
704    // C++:  void cv::dnn::Net::getMemoryConsumption(MatShape netInputShape, size_t& weights, size_t& blobs)
705    //
706
707    public void getMemoryConsumption(MatOfInt netInputShape, long[] weights, long[] blobs) {
708        Mat netInputShape_mat = netInputShape;
709        double[] weights_out = new double[1];
710        double[] blobs_out = new double[1];
711        getMemoryConsumption_0(nativeObj, netInputShape_mat.nativeObj, weights_out, blobs_out);
712        if(weights!=null) weights[0] = (long)weights_out[0];
713        if(blobs!=null) blobs[0] = (long)blobs_out[0];
714    }
715
716
717    //
718    // C++:  void cv::dnn::Net::getMemoryConsumption(int layerId, vector_MatShape netInputShapes, size_t& weights, size_t& blobs)
719    //
720
721    public void getMemoryConsumption(int layerId, List<MatOfInt> netInputShapes, long[] weights, long[] blobs) {
722        double[] weights_out = new double[1];
723        double[] blobs_out = new double[1];
724        getMemoryConsumption_1(nativeObj, layerId, netInputShapes, weights_out, blobs_out);
725        if(weights!=null) weights[0] = (long)weights_out[0];
726        if(blobs!=null) blobs[0] = (long)blobs_out[0];
727    }
728
729
730    //
731    // C++:  void cv::dnn::Net::getMemoryConsumption(int layerId, MatShape netInputShape, size_t& weights, size_t& blobs)
732    //
733
734    public void getMemoryConsumption(int layerId, MatOfInt netInputShape, long[] weights, long[] blobs) {
735        Mat netInputShape_mat = netInputShape;
736        double[] weights_out = new double[1];
737        double[] blobs_out = new double[1];
738        getMemoryConsumption_2(nativeObj, layerId, netInputShape_mat.nativeObj, weights_out, blobs_out);
739        if(weights!=null) weights[0] = (long)weights_out[0];
740        if(blobs!=null) blobs[0] = (long)blobs_out[0];
741    }
742
743
744    //
745    // C++:  void cv::dnn::Net::enableFusion(bool fusion)
746    //
747
748    /**
749     * Enables or disables layer fusion in the network.
750     * @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
751     */
752    public void enableFusion(boolean fusion) {
753        enableFusion_0(nativeObj, fusion);
754    }
755
756
757    //
758    // C++:  void cv::dnn::Net::enableWinograd(bool useWinograd)
759    //
760
761    /**
762     * Enables or disables the Winograd compute branch. The Winograd compute branch can speed up
763     * 3x3 Convolution at a small loss of accuracy.
764     * @param useWinograd true to enable the Winograd compute branch. The default is true.
765     */
766    public void enableWinograd(boolean useWinograd) {
767        enableWinograd_0(nativeObj, useWinograd);
768    }
769
770
771    //
772    // C++:  int64 cv::dnn::Net::getPerfProfile(vector_double& timings)
773    //
774
775    /**
776     * Returns overall time for inference and timings (in ticks) for layers.
777     *
778     * Indexes in returned vector correspond to layers ids. Some layers can be fused with others,
779     * in this case zero ticks count will be return for that skipped layers. Supported by DNN_BACKEND_OPENCV on DNN_TARGET_CPU only.
780     *
781     * @param timings vector for tick timings for all layers.
782     * @return overall ticks for model inference.
783     */
784    public long getPerfProfile(MatOfDouble timings) {
785        Mat timings_mat = timings;
786        return getPerfProfile_0(nativeObj, timings_mat.nativeObj);
787    }
788
789
790    @Override
791    protected void finalize() throws Throwable {
792        delete(nativeObj);
793    }
794
795
796
797    // C++:   cv::dnn::Net::Net()
798    private static native long Net_0();
799
800    // C++: static Net cv::dnn::Net::readFromModelOptimizer(String xml, String bin)
801    private static native long readFromModelOptimizer_0(String xml, String bin);
802
803    // C++: static Net cv::dnn::Net::readFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
804    private static native long readFromModelOptimizer_1(long bufferModelConfig_mat_nativeObj, long bufferWeights_mat_nativeObj);
805
806    // C++:  bool cv::dnn::Net::empty()
807    private static native boolean empty_0(long nativeObj);
808
809    // C++:  String cv::dnn::Net::dump()
810    private static native String dump_0(long nativeObj);
811
812    // C++:  void cv::dnn::Net::dumpToFile(String path)
813    private static native void dumpToFile_0(long nativeObj, String path);
814
815    // C++:  int cv::dnn::Net::getLayerId(String layer)
816    private static native int getLayerId_0(long nativeObj, String layer);
817
818    // C++:  vector_String cv::dnn::Net::getLayerNames()
819    private static native List<String> getLayerNames_0(long nativeObj);
820
821    // C++:  Ptr_Layer cv::dnn::Net::getLayer(int layerId)
822    private static native long getLayer_0(long nativeObj, int layerId);
823
824    // C++:  Ptr_Layer cv::dnn::Net::getLayer(String layerName)
825    private static native long getLayer_1(long nativeObj, String layerName);
826
827    // C++:  Ptr_Layer cv::dnn::Net::getLayer(LayerId layerId)
828    private static native long getLayer_2(long nativeObj, long layerId_nativeObj);
829
830    // C++:  void cv::dnn::Net::connect(String outPin, String inpPin)
831    private static native void connect_0(long nativeObj, String outPin, String inpPin);
832
833    // C++:  void cv::dnn::Net::setInputsNames(vector_String inputBlobNames)
834    private static native void setInputsNames_0(long nativeObj, List<String> inputBlobNames);
835
836    // C++:  void cv::dnn::Net::setInputShape(String inputName, MatShape shape)
837    private static native void setInputShape_0(long nativeObj, String inputName, long shape_mat_nativeObj);
838
839    // C++:  Mat cv::dnn::Net::forward(String outputName = String())
840    private static native long forward_0(long nativeObj, String outputName);
841    private static native long forward_1(long nativeObj);
842
843    // C++:  void cv::dnn::Net::forward(vector_Mat& outputBlobs, String outputName = String())
844    private static native void forward_2(long nativeObj, long outputBlobs_mat_nativeObj, String outputName);
845    private static native void forward_3(long nativeObj, long outputBlobs_mat_nativeObj);
846
847    // C++:  void cv::dnn::Net::forward(vector_Mat& outputBlobs, vector_String outBlobNames)
848    private static native void forward_4(long nativeObj, long outputBlobs_mat_nativeObj, List<String> outBlobNames);
849
850    // C++:  Net cv::dnn::Net::quantize(vector_Mat calibData, int inputsDtype, int outputsDtype, bool perChannel = true)
851    private static native long quantize_0(long nativeObj, long calibData_mat_nativeObj, int inputsDtype, int outputsDtype, boolean perChannel);
852    private static native long quantize_1(long nativeObj, long calibData_mat_nativeObj, int inputsDtype, int outputsDtype);
853
854    // C++:  void cv::dnn::Net::getInputDetails(vector_float& scales, vector_int& zeropoints)
855    private static native void getInputDetails_0(long nativeObj, long scales_mat_nativeObj, long zeropoints_mat_nativeObj);
856
857    // C++:  void cv::dnn::Net::getOutputDetails(vector_float& scales, vector_int& zeropoints)
858    private static native void getOutputDetails_0(long nativeObj, long scales_mat_nativeObj, long zeropoints_mat_nativeObj);
859
860    // C++:  void cv::dnn::Net::setHalideScheduler(String scheduler)
861    private static native void setHalideScheduler_0(long nativeObj, String scheduler);
862
863    // C++:  void cv::dnn::Net::setPreferableBackend(int backendId)
864    private static native void setPreferableBackend_0(long nativeObj, int backendId);
865
866    // C++:  void cv::dnn::Net::setPreferableTarget(int targetId)
867    private static native void setPreferableTarget_0(long nativeObj, int targetId);
868
869    // C++:  void cv::dnn::Net::setInput(Mat blob, String name = "", double scalefactor = 1.0, Scalar mean = Scalar())
870    private static native void setInput_0(long nativeObj, long blob_nativeObj, String name, double scalefactor, double mean_val0, double mean_val1, double mean_val2, double mean_val3);
871    private static native void setInput_1(long nativeObj, long blob_nativeObj, String name, double scalefactor);
872    private static native void setInput_2(long nativeObj, long blob_nativeObj, String name);
873    private static native void setInput_3(long nativeObj, long blob_nativeObj);
874
875    // C++:  void cv::dnn::Net::setParam(int layer, int numParam, Mat blob)
876    private static native void setParam_0(long nativeObj, int layer, int numParam, long blob_nativeObj);
877
878    // C++:  void cv::dnn::Net::setParam(String layerName, int numParam, Mat blob)
879    private static native void setParam_1(long nativeObj, String layerName, int numParam, long blob_nativeObj);
880
881    // C++:  Mat cv::dnn::Net::getParam(int layer, int numParam = 0)
882    private static native long getParam_0(long nativeObj, int layer, int numParam);
883    private static native long getParam_1(long nativeObj, int layer);
884
885    // C++:  Mat cv::dnn::Net::getParam(String layerName, int numParam = 0)
886    private static native long getParam_2(long nativeObj, String layerName, int numParam);
887    private static native long getParam_3(long nativeObj, String layerName);
888
889    // C++:  vector_int cv::dnn::Net::getUnconnectedOutLayers()
890    private static native long getUnconnectedOutLayers_0(long nativeObj);
891
892    // C++:  vector_String cv::dnn::Net::getUnconnectedOutLayersNames()
893    private static native List<String> getUnconnectedOutLayersNames_0(long nativeObj);
894
895    // C++:  int64 cv::dnn::Net::getFLOPS(vector_MatShape netInputShapes)
896    private static native long getFLOPS_0(long nativeObj, List<MatOfInt> netInputShapes);
897
898    // C++:  int64 cv::dnn::Net::getFLOPS(MatShape netInputShape)
899    private static native long getFLOPS_1(long nativeObj, long netInputShape_mat_nativeObj);
900
901    // C++:  int64 cv::dnn::Net::getFLOPS(int layerId, vector_MatShape netInputShapes)
902    private static native long getFLOPS_2(long nativeObj, int layerId, List<MatOfInt> netInputShapes);
903
904    // C++:  int64 cv::dnn::Net::getFLOPS(int layerId, MatShape netInputShape)
905    private static native long getFLOPS_3(long nativeObj, int layerId, long netInputShape_mat_nativeObj);
906
907    // C++:  void cv::dnn::Net::getLayerTypes(vector_String& layersTypes)
908    private static native void getLayerTypes_0(long nativeObj, List<String> layersTypes);
909
910    // C++:  int cv::dnn::Net::getLayersCount(String layerType)
911    private static native int getLayersCount_0(long nativeObj, String layerType);
912
913    // C++:  void cv::dnn::Net::getMemoryConsumption(MatShape netInputShape, size_t& weights, size_t& blobs)
914    private static native void getMemoryConsumption_0(long nativeObj, long netInputShape_mat_nativeObj, double[] weights_out, double[] blobs_out);
915
916    // C++:  void cv::dnn::Net::getMemoryConsumption(int layerId, vector_MatShape netInputShapes, size_t& weights, size_t& blobs)
917    private static native void getMemoryConsumption_1(long nativeObj, int layerId, List<MatOfInt> netInputShapes, double[] weights_out, double[] blobs_out);
918
919    // C++:  void cv::dnn::Net::getMemoryConsumption(int layerId, MatShape netInputShape, size_t& weights, size_t& blobs)
920    private static native void getMemoryConsumption_2(long nativeObj, int layerId, long netInputShape_mat_nativeObj, double[] weights_out, double[] blobs_out);
921
922    // C++:  void cv::dnn::Net::enableFusion(bool fusion)
923    private static native void enableFusion_0(long nativeObj, boolean fusion);
924
925    // C++:  void cv::dnn::Net::enableWinograd(bool useWinograd)
926    private static native void enableWinograd_0(long nativeObj, boolean useWinograd);
927
928    // C++:  int64 cv::dnn::Net::getPerfProfile(vector_double& timings)
929    private static native long getPerfProfile_0(long nativeObj, long timings_mat_nativeObj);
930
931    // native support for java finalize()
932    private static native void delete(long nativeObj);
933
934}