001// 002// This file is auto-generated. Please don't modify it! 003// 004package org.opencv.dnn; 005 006import java.util.ArrayList; 007import java.util.List; 008import org.opencv.core.Mat; 009import org.opencv.core.MatOfByte; 010import org.opencv.core.MatOfFloat; 011import org.opencv.core.MatOfInt; 012import org.opencv.core.MatOfRect; 013import org.opencv.core.MatOfRect2d; 014import org.opencv.core.MatOfRotatedRect; 015import org.opencv.core.Scalar; 016import org.opencv.core.Size; 017import org.opencv.dnn.Image2BlobParams; 018import org.opencv.dnn.Net; 019import org.opencv.utils.Converters; 020 021// C++: class Dnn 022 023public class Dnn { 024 025 // C++: enum Backend (cv.dnn.Backend) 026 public static final int 027 DNN_BACKEND_DEFAULT = 0, 028 DNN_BACKEND_HALIDE = 0+1, 029 DNN_BACKEND_INFERENCE_ENGINE = 0+2, 030 DNN_BACKEND_OPENCV = 0+3, 031 DNN_BACKEND_VKCOM = 0+4, 032 DNN_BACKEND_CUDA = 0+5, 033 DNN_BACKEND_WEBNN = 0+6, 034 DNN_BACKEND_TIMVX = 0+7, 035 DNN_BACKEND_CANN = 0+8; 036 037 038 // C++: enum DataLayout (cv.dnn.DataLayout) 039 public static final int 040 DNN_LAYOUT_UNKNOWN = 0, 041 DNN_LAYOUT_ND = 1, 042 DNN_LAYOUT_NCHW = 2, 043 DNN_LAYOUT_NCDHW = 3, 044 DNN_LAYOUT_NHWC = 4, 045 DNN_LAYOUT_NDHWC = 5, 046 DNN_LAYOUT_PLANAR = 6; 047 048 049 // C++: enum ImagePaddingMode (cv.dnn.ImagePaddingMode) 050 public static final int 051 DNN_PMODE_NULL = 0, 052 DNN_PMODE_CROP_CENTER = 1, 053 DNN_PMODE_LETTERBOX = 2; 054 055 056 // C++: enum SoftNMSMethod (cv.dnn.SoftNMSMethod) 057 public static final int 058 SoftNMSMethod_SOFTNMS_LINEAR = 1, 059 SoftNMSMethod_SOFTNMS_GAUSSIAN = 2; 060 061 062 // C++: enum Target (cv.dnn.Target) 063 public static final int 064 DNN_TARGET_CPU = 0, 065 DNN_TARGET_OPENCL = 0+1, 066 DNN_TARGET_OPENCL_FP16 = 0+2, 067 DNN_TARGET_MYRIAD = 0+3, 068 DNN_TARGET_VULKAN = 0+4, 069 DNN_TARGET_FPGA = 0+5, 070 DNN_TARGET_CUDA = 0+6, 071 DNN_TARGET_CUDA_FP16 = 0+7, 072 DNN_TARGET_HDDL = 0+8, 073 DNN_TARGET_NPU = 0+9, 074 DNN_TARGET_CPU_FP16 = 0+10; 075 076 077 // 078 // C++: vector_Target cv::dnn::getAvailableTargets(dnn_Backend be) 079 // 080 081 public static List<Integer> getAvailableTargets(int be) { 082 return getAvailableTargets_0(be); 083 } 084 085 086 // 087 // C++: Net cv::dnn::readNetFromDarknet(String cfgFile, String darknetModel = String()) 088 // 089 090 /** 091 * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files. 092 * @param cfgFile path to the .cfg file with text description of the network architecture. 093 * @param darknetModel path to the .weights file with learned network. 094 * @return Network object that ready to do forward, throw an exception in failure cases. 095 */ 096 public static Net readNetFromDarknet(String cfgFile, String darknetModel) { 097 return new Net(readNetFromDarknet_0(cfgFile, darknetModel)); 098 } 099 100 /** 101 * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files. 102 * @param cfgFile path to the .cfg file with text description of the network architecture. 103 * @return Network object that ready to do forward, throw an exception in failure cases. 104 */ 105 public static Net readNetFromDarknet(String cfgFile) { 106 return new Net(readNetFromDarknet_1(cfgFile)); 107 } 108 109 110 // 111 // C++: Net cv::dnn::readNetFromDarknet(vector_uchar bufferCfg, vector_uchar bufferModel = std::vector<uchar>()) 112 // 113 114 /** 115 * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files. 116 * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture. 117 * @param bufferModel A buffer contains a content of .weights file with learned network. 118 * @return Net object. 119 */ 120 public static Net readNetFromDarknet(MatOfByte bufferCfg, MatOfByte bufferModel) { 121 Mat bufferCfg_mat = bufferCfg; 122 Mat bufferModel_mat = bufferModel; 123 return new Net(readNetFromDarknet_2(bufferCfg_mat.nativeObj, bufferModel_mat.nativeObj)); 124 } 125 126 /** 127 * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files. 128 * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture. 129 * @return Net object. 130 */ 131 public static Net readNetFromDarknet(MatOfByte bufferCfg) { 132 Mat bufferCfg_mat = bufferCfg; 133 return new Net(readNetFromDarknet_3(bufferCfg_mat.nativeObj)); 134 } 135 136 137 // 138 // C++: Net cv::dnn::readNetFromCaffe(String prototxt, String caffeModel = String()) 139 // 140 141 /** 142 * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format. 143 * @param prototxt path to the .prototxt file with text description of the network architecture. 144 * @param caffeModel path to the .caffemodel file with learned network. 145 * @return Net object. 146 */ 147 public static Net readNetFromCaffe(String prototxt, String caffeModel) { 148 return new Net(readNetFromCaffe_0(prototxt, caffeModel)); 149 } 150 151 /** 152 * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format. 153 * @param prototxt path to the .prototxt file with text description of the network architecture. 154 * @return Net object. 155 */ 156 public static Net readNetFromCaffe(String prototxt) { 157 return new Net(readNetFromCaffe_1(prototxt)); 158 } 159 160 161 // 162 // C++: Net cv::dnn::readNetFromCaffe(vector_uchar bufferProto, vector_uchar bufferModel = std::vector<uchar>()) 163 // 164 165 /** 166 * Reads a network model stored in Caffe model in memory. 167 * @param bufferProto buffer containing the content of the .prototxt file 168 * @param bufferModel buffer containing the content of the .caffemodel file 169 * @return Net object. 170 */ 171 public static Net readNetFromCaffe(MatOfByte bufferProto, MatOfByte bufferModel) { 172 Mat bufferProto_mat = bufferProto; 173 Mat bufferModel_mat = bufferModel; 174 return new Net(readNetFromCaffe_2(bufferProto_mat.nativeObj, bufferModel_mat.nativeObj)); 175 } 176 177 /** 178 * Reads a network model stored in Caffe model in memory. 179 * @param bufferProto buffer containing the content of the .prototxt file 180 * @return Net object. 181 */ 182 public static Net readNetFromCaffe(MatOfByte bufferProto) { 183 Mat bufferProto_mat = bufferProto; 184 return new Net(readNetFromCaffe_3(bufferProto_mat.nativeObj)); 185 } 186 187 188 // 189 // C++: Net cv::dnn::readNetFromTensorflow(String model, String config = String()) 190 // 191 192 /** 193 * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format. 194 * @param model path to the .pb file with binary protobuf description of the network architecture 195 * @param config path to the .pbtxt file that contains text graph definition in protobuf format. 196 * Resulting Net object is built by text graph using weights from a binary one that 197 * let us make it more flexible. 198 * @return Net object. 199 */ 200 public static Net readNetFromTensorflow(String model, String config) { 201 return new Net(readNetFromTensorflow_0(model, config)); 202 } 203 204 /** 205 * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format. 206 * @param model path to the .pb file with binary protobuf description of the network architecture 207 * Resulting Net object is built by text graph using weights from a binary one that 208 * let us make it more flexible. 209 * @return Net object. 210 */ 211 public static Net readNetFromTensorflow(String model) { 212 return new Net(readNetFromTensorflow_1(model)); 213 } 214 215 216 // 217 // C++: Net cv::dnn::readNetFromTensorflow(vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>()) 218 // 219 220 /** 221 * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format. 222 * @param bufferModel buffer containing the content of the pb file 223 * @param bufferConfig buffer containing the content of the pbtxt file 224 * @return Net object. 225 */ 226 public static Net readNetFromTensorflow(MatOfByte bufferModel, MatOfByte bufferConfig) { 227 Mat bufferModel_mat = bufferModel; 228 Mat bufferConfig_mat = bufferConfig; 229 return new Net(readNetFromTensorflow_2(bufferModel_mat.nativeObj, bufferConfig_mat.nativeObj)); 230 } 231 232 /** 233 * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format. 234 * @param bufferModel buffer containing the content of the pb file 235 * @return Net object. 236 */ 237 public static Net readNetFromTensorflow(MatOfByte bufferModel) { 238 Mat bufferModel_mat = bufferModel; 239 return new Net(readNetFromTensorflow_3(bufferModel_mat.nativeObj)); 240 } 241 242 243 // 244 // C++: Net cv::dnn::readNetFromTFLite(String model) 245 // 246 247 /** 248 * Reads a network model stored in <a href="https://www.tensorflow.org/lite">TFLite</a> framework's format. 249 * @param model path to the .tflite file with binary flatbuffers description of the network architecture 250 * @return Net object. 251 */ 252 public static Net readNetFromTFLite(String model) { 253 return new Net(readNetFromTFLite_0(model)); 254 } 255 256 257 // 258 // C++: Net cv::dnn::readNetFromTFLite(vector_uchar bufferModel) 259 // 260 261 /** 262 * Reads a network model stored in <a href="https://www.tensorflow.org/lite">TFLite</a> framework's format. 263 * @param bufferModel buffer containing the content of the tflite file 264 * @return Net object. 265 */ 266 public static Net readNetFromTFLite(MatOfByte bufferModel) { 267 Mat bufferModel_mat = bufferModel; 268 return new Net(readNetFromTFLite_1(bufferModel_mat.nativeObj)); 269 } 270 271 272 // 273 // C++: Net cv::dnn::readNetFromTorch(String model, bool isBinary = true, bool evaluate = true) 274 // 275 276 /** 277 * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format. 278 * @param model path to the file, dumped from Torch by using torch.save() function. 279 * @param isBinary specifies whether the network was serialized in ascii mode or binary. 280 * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch. 281 * @return Net object. 282 * 283 * <b>Note:</b> Ascii mode of Torch serializer is more preferable, because binary mode extensively use {@code long} type of C language, 284 * which has various bit-length on different systems. 285 * 286 * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object 287 * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors. 288 * 289 * List of supported layers (i.e. object instances derived from Torch nn.Module class): 290 * - nn.Sequential 291 * - nn.Parallel 292 * - nn.Concat 293 * - nn.Linear 294 * - nn.SpatialConvolution 295 * - nn.SpatialMaxPooling, nn.SpatialAveragePooling 296 * - nn.ReLU, nn.TanH, nn.Sigmoid 297 * - nn.Reshape 298 * - nn.SoftMax, nn.LogSoftMax 299 * 300 * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported. 301 */ 302 public static Net readNetFromTorch(String model, boolean isBinary, boolean evaluate) { 303 return new Net(readNetFromTorch_0(model, isBinary, evaluate)); 304 } 305 306 /** 307 * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format. 308 * @param model path to the file, dumped from Torch by using torch.save() function. 309 * @param isBinary specifies whether the network was serialized in ascii mode or binary. 310 * @return Net object. 311 * 312 * <b>Note:</b> Ascii mode of Torch serializer is more preferable, because binary mode extensively use {@code long} type of C language, 313 * which has various bit-length on different systems. 314 * 315 * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object 316 * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors. 317 * 318 * List of supported layers (i.e. object instances derived from Torch nn.Module class): 319 * - nn.Sequential 320 * - nn.Parallel 321 * - nn.Concat 322 * - nn.Linear 323 * - nn.SpatialConvolution 324 * - nn.SpatialMaxPooling, nn.SpatialAveragePooling 325 * - nn.ReLU, nn.TanH, nn.Sigmoid 326 * - nn.Reshape 327 * - nn.SoftMax, nn.LogSoftMax 328 * 329 * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported. 330 */ 331 public static Net readNetFromTorch(String model, boolean isBinary) { 332 return new Net(readNetFromTorch_1(model, isBinary)); 333 } 334 335 /** 336 * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format. 337 * @param model path to the file, dumped from Torch by using torch.save() function. 338 * @return Net object. 339 * 340 * <b>Note:</b> Ascii mode of Torch serializer is more preferable, because binary mode extensively use {@code long} type of C language, 341 * which has various bit-length on different systems. 342 * 343 * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object 344 * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors. 345 * 346 * List of supported layers (i.e. object instances derived from Torch nn.Module class): 347 * - nn.Sequential 348 * - nn.Parallel 349 * - nn.Concat 350 * - nn.Linear 351 * - nn.SpatialConvolution 352 * - nn.SpatialMaxPooling, nn.SpatialAveragePooling 353 * - nn.ReLU, nn.TanH, nn.Sigmoid 354 * - nn.Reshape 355 * - nn.SoftMax, nn.LogSoftMax 356 * 357 * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported. 358 */ 359 public static Net readNetFromTorch(String model) { 360 return new Net(readNetFromTorch_2(model)); 361 } 362 363 364 // 365 // C++: Net cv::dnn::readNet(String model, String config = "", String framework = "") 366 // 367 368 /** 369 * Read deep learning network represented in one of the supported formats. 370 * @param model Binary file contains trained weights. The following file 371 * extensions are expected for models from different frameworks: 372 * * {@code *.caffemodel} (Caffe, http://caffe.berkeleyvision.org/) 373 * * {@code *.pb} (TensorFlow, https://www.tensorflow.org/) 374 * * {@code *.t7} | {@code *.net} (Torch, http://torch.ch/) 375 * * {@code *.weights} (Darknet, https://pjreddie.com/darknet/) 376 * * {@code *.bin} (DLDT, https://software.intel.com/openvino-toolkit) 377 * * {@code *.onnx} (ONNX, https://onnx.ai/) 378 * @param config Text file contains network configuration. It could be a 379 * file with the following extensions: 380 * * {@code *.prototxt} (Caffe, http://caffe.berkeleyvision.org/) 381 * * {@code *.pbtxt} (TensorFlow, https://www.tensorflow.org/) 382 * * {@code *.cfg} (Darknet, https://pjreddie.com/darknet/) 383 * * {@code *.xml} (DLDT, https://software.intel.com/openvino-toolkit) 384 * @param framework Explicit framework name tag to determine a format. 385 * @return Net object. 386 * 387 * This function automatically detects an origin framework of trained model 388 * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow, 389 * REF: readNetFromTorch or REF: readNetFromDarknet. An order of {@code model} and {@code config} 390 * arguments does not matter. 391 */ 392 public static Net readNet(String model, String config, String framework) { 393 return new Net(readNet_0(model, config, framework)); 394 } 395 396 /** 397 * Read deep learning network represented in one of the supported formats. 398 * @param model Binary file contains trained weights. The following file 399 * extensions are expected for models from different frameworks: 400 * * {@code *.caffemodel} (Caffe, http://caffe.berkeleyvision.org/) 401 * * {@code *.pb} (TensorFlow, https://www.tensorflow.org/) 402 * * {@code *.t7} | {@code *.net} (Torch, http://torch.ch/) 403 * * {@code *.weights} (Darknet, https://pjreddie.com/darknet/) 404 * * {@code *.bin} (DLDT, https://software.intel.com/openvino-toolkit) 405 * * {@code *.onnx} (ONNX, https://onnx.ai/) 406 * @param config Text file contains network configuration. It could be a 407 * file with the following extensions: 408 * * {@code *.prototxt} (Caffe, http://caffe.berkeleyvision.org/) 409 * * {@code *.pbtxt} (TensorFlow, https://www.tensorflow.org/) 410 * * {@code *.cfg} (Darknet, https://pjreddie.com/darknet/) 411 * * {@code *.xml} (DLDT, https://software.intel.com/openvino-toolkit) 412 * @return Net object. 413 * 414 * This function automatically detects an origin framework of trained model 415 * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow, 416 * REF: readNetFromTorch or REF: readNetFromDarknet. An order of {@code model} and {@code config} 417 * arguments does not matter. 418 */ 419 public static Net readNet(String model, String config) { 420 return new Net(readNet_1(model, config)); 421 } 422 423 /** 424 * Read deep learning network represented in one of the supported formats. 425 * @param model Binary file contains trained weights. The following file 426 * extensions are expected for models from different frameworks: 427 * * {@code *.caffemodel} (Caffe, http://caffe.berkeleyvision.org/) 428 * * {@code *.pb} (TensorFlow, https://www.tensorflow.org/) 429 * * {@code *.t7} | {@code *.net} (Torch, http://torch.ch/) 430 * * {@code *.weights} (Darknet, https://pjreddie.com/darknet/) 431 * * {@code *.bin} (DLDT, https://software.intel.com/openvino-toolkit) 432 * * {@code *.onnx} (ONNX, https://onnx.ai/) 433 * file with the following extensions: 434 * * {@code *.prototxt} (Caffe, http://caffe.berkeleyvision.org/) 435 * * {@code *.pbtxt} (TensorFlow, https://www.tensorflow.org/) 436 * * {@code *.cfg} (Darknet, https://pjreddie.com/darknet/) 437 * * {@code *.xml} (DLDT, https://software.intel.com/openvino-toolkit) 438 * @return Net object. 439 * 440 * This function automatically detects an origin framework of trained model 441 * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow, 442 * REF: readNetFromTorch or REF: readNetFromDarknet. An order of {@code model} and {@code config} 443 * arguments does not matter. 444 */ 445 public static Net readNet(String model) { 446 return new Net(readNet_2(model)); 447 } 448 449 450 // 451 // C++: Net cv::dnn::readNet(String framework, vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>()) 452 // 453 454 /** 455 * Read deep learning network represented in one of the supported formats. 456 * This is an overloaded member function, provided for convenience. 457 * It differs from the above function only in what argument(s) it accepts. 458 * @param framework Name of origin framework. 459 * @param bufferModel A buffer with a content of binary file with weights 460 * @param bufferConfig A buffer with a content of text file contains network configuration. 461 * @return Net object. 462 */ 463 public static Net readNet(String framework, MatOfByte bufferModel, MatOfByte bufferConfig) { 464 Mat bufferModel_mat = bufferModel; 465 Mat bufferConfig_mat = bufferConfig; 466 return new Net(readNet_3(framework, bufferModel_mat.nativeObj, bufferConfig_mat.nativeObj)); 467 } 468 469 /** 470 * Read deep learning network represented in one of the supported formats. 471 * This is an overloaded member function, provided for convenience. 472 * It differs from the above function only in what argument(s) it accepts. 473 * @param framework Name of origin framework. 474 * @param bufferModel A buffer with a content of binary file with weights 475 * @return Net object. 476 */ 477 public static Net readNet(String framework, MatOfByte bufferModel) { 478 Mat bufferModel_mat = bufferModel; 479 return new Net(readNet_4(framework, bufferModel_mat.nativeObj)); 480 } 481 482 483 // 484 // C++: Mat cv::dnn::readTorchBlob(String filename, bool isBinary = true) 485 // 486 487 /** 488 * Loads blob which was serialized as torch.Tensor object of Torch7 framework. 489 * WARNING: This function has the same limitations as readNetFromTorch(). 490 * @param filename automatically generated 491 * @param isBinary automatically generated 492 * @return automatically generated 493 */ 494 public static Mat readTorchBlob(String filename, boolean isBinary) { 495 return new Mat(readTorchBlob_0(filename, isBinary)); 496 } 497 498 /** 499 * Loads blob which was serialized as torch.Tensor object of Torch7 framework. 500 * WARNING: This function has the same limitations as readNetFromTorch(). 501 * @param filename automatically generated 502 * @return automatically generated 503 */ 504 public static Mat readTorchBlob(String filename) { 505 return new Mat(readTorchBlob_1(filename)); 506 } 507 508 509 // 510 // C++: Net cv::dnn::readNetFromModelOptimizer(String xml, String bin) 511 // 512 513 /** 514 * Load a network from Intel's Model Optimizer intermediate representation. 515 * @param xml XML configuration file with network's topology. 516 * @param bin Binary file with trained weights. 517 * @return Net object. 518 * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine 519 * backend. 520 */ 521 public static Net readNetFromModelOptimizer(String xml, String bin) { 522 return new Net(readNetFromModelOptimizer_0(xml, bin)); 523 } 524 525 526 // 527 // C++: Net cv::dnn::readNetFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights) 528 // 529 530 /** 531 * Load a network from Intel's Model Optimizer intermediate representation. 532 * @param bufferModelConfig Buffer contains XML configuration with network's topology. 533 * @param bufferWeights Buffer contains binary data with trained weights. 534 * @return Net object. 535 * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine 536 * backend. 537 */ 538 public static Net readNetFromModelOptimizer(MatOfByte bufferModelConfig, MatOfByte bufferWeights) { 539 Mat bufferModelConfig_mat = bufferModelConfig; 540 Mat bufferWeights_mat = bufferWeights; 541 return new Net(readNetFromModelOptimizer_1(bufferModelConfig_mat.nativeObj, bufferWeights_mat.nativeObj)); 542 } 543 544 545 // 546 // C++: Net cv::dnn::readNetFromONNX(String onnxFile) 547 // 548 549 /** 550 * Reads a network model <a href="https://onnx.ai/">ONNX</a>. 551 * @param onnxFile path to the .onnx file with text description of the network architecture. 552 * @return Network object that ready to do forward, throw an exception in failure cases. 553 */ 554 public static Net readNetFromONNX(String onnxFile) { 555 return new Net(readNetFromONNX_0(onnxFile)); 556 } 557 558 559 // 560 // C++: Net cv::dnn::readNetFromONNX(vector_uchar buffer) 561 // 562 563 /** 564 * Reads a network model from <a href="https://onnx.ai/">ONNX</a> 565 * in-memory buffer. 566 * @param buffer in-memory buffer that stores the ONNX model bytes. 567 * @return Network object that ready to do forward, throw an exception 568 * in failure cases. 569 */ 570 public static Net readNetFromONNX(MatOfByte buffer) { 571 Mat buffer_mat = buffer; 572 return new Net(readNetFromONNX_1(buffer_mat.nativeObj)); 573 } 574 575 576 // 577 // C++: Mat cv::dnn::readTensorFromONNX(String path) 578 // 579 580 /** 581 * Creates blob from .pb file. 582 * @param path to the .pb file with input tensor. 583 * @return Mat. 584 */ 585 public static Mat readTensorFromONNX(String path) { 586 return new Mat(readTensorFromONNX_0(path)); 587 } 588 589 590 // 591 // C++: Mat cv::dnn::blobFromImage(Mat image, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F) 592 // 593 594 /** 595 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 596 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 597 * @param image input image (with 1-, 3- or 4-channels). 598 * @param scalefactor multiplier for {@code images} values. 599 * @param size spatial size for output image 600 * @param mean scalar with mean values which are subtracted from channels. Values are intended 601 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 602 * @param swapRB flag which indicates that swap first and last channels 603 * in 3-channel image is necessary. 604 * @param crop flag which indicates whether image will be cropped after resize or not 605 * @param ddepth Depth of output blob. Choose CV_32F or CV_8U. 606 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 607 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 608 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 609 * @return 4-dimensional Mat with NCHW dimensions order. 610 * 611 * <b>Note:</b> 612 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 613 */ 614 public static Mat blobFromImage(Mat image, double scalefactor, Size size, Scalar mean, boolean swapRB, boolean crop, int ddepth) { 615 return new Mat(blobFromImage_0(image.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB, crop, ddepth)); 616 } 617 618 /** 619 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 620 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 621 * @param image input image (with 1-, 3- or 4-channels). 622 * @param scalefactor multiplier for {@code images} values. 623 * @param size spatial size for output image 624 * @param mean scalar with mean values which are subtracted from channels. Values are intended 625 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 626 * @param swapRB flag which indicates that swap first and last channels 627 * in 3-channel image is necessary. 628 * @param crop flag which indicates whether image will be cropped after resize or not 629 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 630 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 631 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 632 * @return 4-dimensional Mat with NCHW dimensions order. 633 * 634 * <b>Note:</b> 635 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 636 */ 637 public static Mat blobFromImage(Mat image, double scalefactor, Size size, Scalar mean, boolean swapRB, boolean crop) { 638 return new Mat(blobFromImage_1(image.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB, crop)); 639 } 640 641 /** 642 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 643 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 644 * @param image input image (with 1-, 3- or 4-channels). 645 * @param scalefactor multiplier for {@code images} values. 646 * @param size spatial size for output image 647 * @param mean scalar with mean values which are subtracted from channels. Values are intended 648 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 649 * @param swapRB flag which indicates that swap first and last channels 650 * in 3-channel image is necessary. 651 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 652 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 653 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 654 * @return 4-dimensional Mat with NCHW dimensions order. 655 * 656 * <b>Note:</b> 657 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 658 */ 659 public static Mat blobFromImage(Mat image, double scalefactor, Size size, Scalar mean, boolean swapRB) { 660 return new Mat(blobFromImage_2(image.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB)); 661 } 662 663 /** 664 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 665 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 666 * @param image input image (with 1-, 3- or 4-channels). 667 * @param scalefactor multiplier for {@code images} values. 668 * @param size spatial size for output image 669 * @param mean scalar with mean values which are subtracted from channels. Values are intended 670 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 671 * in 3-channel image is necessary. 672 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 673 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 674 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 675 * @return 4-dimensional Mat with NCHW dimensions order. 676 * 677 * <b>Note:</b> 678 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 679 */ 680 public static Mat blobFromImage(Mat image, double scalefactor, Size size, Scalar mean) { 681 return new Mat(blobFromImage_3(image.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3])); 682 } 683 684 /** 685 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 686 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 687 * @param image input image (with 1-, 3- or 4-channels). 688 * @param scalefactor multiplier for {@code images} values. 689 * @param size spatial size for output image 690 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 691 * in 3-channel image is necessary. 692 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 693 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 694 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 695 * @return 4-dimensional Mat with NCHW dimensions order. 696 * 697 * <b>Note:</b> 698 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 699 */ 700 public static Mat blobFromImage(Mat image, double scalefactor, Size size) { 701 return new Mat(blobFromImage_4(image.nativeObj, scalefactor, size.width, size.height)); 702 } 703 704 /** 705 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 706 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 707 * @param image input image (with 1-, 3- or 4-channels). 708 * @param scalefactor multiplier for {@code images} values. 709 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 710 * in 3-channel image is necessary. 711 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 712 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 713 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 714 * @return 4-dimensional Mat with NCHW dimensions order. 715 * 716 * <b>Note:</b> 717 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 718 */ 719 public static Mat blobFromImage(Mat image, double scalefactor) { 720 return new Mat(blobFromImage_5(image.nativeObj, scalefactor)); 721 } 722 723 /** 724 * Creates 4-dimensional blob from image. Optionally resizes and crops {@code image} from center, 725 * subtract {@code mean} values, scales values by {@code scalefactor}, swap Blue and Red channels. 726 * @param image input image (with 1-, 3- or 4-channels). 727 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 728 * in 3-channel image is necessary. 729 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 730 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 731 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 732 * @return 4-dimensional Mat with NCHW dimensions order. 733 * 734 * <b>Note:</b> 735 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 736 */ 737 public static Mat blobFromImage(Mat image) { 738 return new Mat(blobFromImage_6(image.nativeObj)); 739 } 740 741 742 // 743 // C++: Mat cv::dnn::blobFromImages(vector_Mat images, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F) 744 // 745 746 /** 747 * Creates 4-dimensional blob from series of images. Optionally resizes and 748 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 749 * swap Blue and Red channels. 750 * @param images input images (all with 1-, 3- or 4-channels). 751 * @param size spatial size for output image 752 * @param mean scalar with mean values which are subtracted from channels. Values are intended 753 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 754 * @param scalefactor multiplier for {@code images} values. 755 * @param swapRB flag which indicates that swap first and last channels 756 * in 3-channel image is necessary. 757 * @param crop flag which indicates whether image will be cropped after resize or not 758 * @param ddepth Depth of output blob. Choose CV_32F or CV_8U. 759 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 760 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 761 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 762 * @return 4-dimensional Mat with NCHW dimensions order. 763 * 764 * <b>Note:</b> 765 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 766 */ 767 public static Mat blobFromImages(List<Mat> images, double scalefactor, Size size, Scalar mean, boolean swapRB, boolean crop, int ddepth) { 768 Mat images_mat = Converters.vector_Mat_to_Mat(images); 769 return new Mat(blobFromImages_0(images_mat.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB, crop, ddepth)); 770 } 771 772 /** 773 * Creates 4-dimensional blob from series of images. Optionally resizes and 774 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 775 * swap Blue and Red channels. 776 * @param images input images (all with 1-, 3- or 4-channels). 777 * @param size spatial size for output image 778 * @param mean scalar with mean values which are subtracted from channels. Values are intended 779 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 780 * @param scalefactor multiplier for {@code images} values. 781 * @param swapRB flag which indicates that swap first and last channels 782 * in 3-channel image is necessary. 783 * @param crop flag which indicates whether image will be cropped after resize or not 784 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 785 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 786 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 787 * @return 4-dimensional Mat with NCHW dimensions order. 788 * 789 * <b>Note:</b> 790 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 791 */ 792 public static Mat blobFromImages(List<Mat> images, double scalefactor, Size size, Scalar mean, boolean swapRB, boolean crop) { 793 Mat images_mat = Converters.vector_Mat_to_Mat(images); 794 return new Mat(blobFromImages_1(images_mat.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB, crop)); 795 } 796 797 /** 798 * Creates 4-dimensional blob from series of images. Optionally resizes and 799 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 800 * swap Blue and Red channels. 801 * @param images input images (all with 1-, 3- or 4-channels). 802 * @param size spatial size for output image 803 * @param mean scalar with mean values which are subtracted from channels. Values are intended 804 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 805 * @param scalefactor multiplier for {@code images} values. 806 * @param swapRB flag which indicates that swap first and last channels 807 * in 3-channel image is necessary. 808 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 809 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 810 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 811 * @return 4-dimensional Mat with NCHW dimensions order. 812 * 813 * <b>Note:</b> 814 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 815 */ 816 public static Mat blobFromImages(List<Mat> images, double scalefactor, Size size, Scalar mean, boolean swapRB) { 817 Mat images_mat = Converters.vector_Mat_to_Mat(images); 818 return new Mat(blobFromImages_2(images_mat.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB)); 819 } 820 821 /** 822 * Creates 4-dimensional blob from series of images. Optionally resizes and 823 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 824 * swap Blue and Red channels. 825 * @param images input images (all with 1-, 3- or 4-channels). 826 * @param size spatial size for output image 827 * @param mean scalar with mean values which are subtracted from channels. Values are intended 828 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 829 * @param scalefactor multiplier for {@code images} values. 830 * in 3-channel image is necessary. 831 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 832 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 833 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 834 * @return 4-dimensional Mat with NCHW dimensions order. 835 * 836 * <b>Note:</b> 837 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 838 */ 839 public static Mat blobFromImages(List<Mat> images, double scalefactor, Size size, Scalar mean) { 840 Mat images_mat = Converters.vector_Mat_to_Mat(images); 841 return new Mat(blobFromImages_3(images_mat.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3])); 842 } 843 844 /** 845 * Creates 4-dimensional blob from series of images. Optionally resizes and 846 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 847 * swap Blue and Red channels. 848 * @param images input images (all with 1-, 3- or 4-channels). 849 * @param size spatial size for output image 850 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 851 * @param scalefactor multiplier for {@code images} values. 852 * in 3-channel image is necessary. 853 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 854 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 855 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 856 * @return 4-dimensional Mat with NCHW dimensions order. 857 * 858 * <b>Note:</b> 859 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 860 */ 861 public static Mat blobFromImages(List<Mat> images, double scalefactor, Size size) { 862 Mat images_mat = Converters.vector_Mat_to_Mat(images); 863 return new Mat(blobFromImages_4(images_mat.nativeObj, scalefactor, size.width, size.height)); 864 } 865 866 /** 867 * Creates 4-dimensional blob from series of images. Optionally resizes and 868 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 869 * swap Blue and Red channels. 870 * @param images input images (all with 1-, 3- or 4-channels). 871 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 872 * @param scalefactor multiplier for {@code images} values. 873 * in 3-channel image is necessary. 874 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 875 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 876 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 877 * @return 4-dimensional Mat with NCHW dimensions order. 878 * 879 * <b>Note:</b> 880 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 881 */ 882 public static Mat blobFromImages(List<Mat> images, double scalefactor) { 883 Mat images_mat = Converters.vector_Mat_to_Mat(images); 884 return new Mat(blobFromImages_5(images_mat.nativeObj, scalefactor)); 885 } 886 887 /** 888 * Creates 4-dimensional blob from series of images. Optionally resizes and 889 * crops {@code images} from center, subtract {@code mean} values, scales values by {@code scalefactor}, 890 * swap Blue and Red channels. 891 * @param images input images (all with 1-, 3- or 4-channels). 892 * to be in (mean-R, mean-G, mean-B) order if {@code image} has BGR ordering and {@code swapRB} is true. 893 * in 3-channel image is necessary. 894 * if {@code crop} is true, input image is resized so one side after resize is equal to corresponding 895 * dimension in {@code size} and another one is equal or larger. Then, crop from the center is performed. 896 * If {@code crop} is false, direct resize without cropping and preserving aspect ratio is performed. 897 * @return 4-dimensional Mat with NCHW dimensions order. 898 * 899 * <b>Note:</b> 900 * The order and usage of {@code scalefactor} and {@code mean} are (input - mean) * scalefactor. 901 */ 902 public static Mat blobFromImages(List<Mat> images) { 903 Mat images_mat = Converters.vector_Mat_to_Mat(images); 904 return new Mat(blobFromImages_6(images_mat.nativeObj)); 905 } 906 907 908 // 909 // C++: Mat cv::dnn::blobFromImageWithParams(Mat image, Image2BlobParams param = Image2BlobParams()) 910 // 911 912 /** 913 * Creates 4-dimensional blob from image with given params. 914 * 915 * This function is an extension of REF: blobFromImage to meet more image preprocess needs. 916 * Given input image and preprocessing parameters, and function outputs the blob. 917 * 918 * @param image input image (all with 1-, 3- or 4-channels). 919 * @param param struct of Image2BlobParams, contains all parameters needed by processing of image to blob. 920 * @return 4-dimensional Mat. 921 */ 922 public static Mat blobFromImageWithParams(Mat image, Image2BlobParams param) { 923 return new Mat(blobFromImageWithParams_0(image.nativeObj, param.nativeObj)); 924 } 925 926 /** 927 * Creates 4-dimensional blob from image with given params. 928 * 929 * This function is an extension of REF: blobFromImage to meet more image preprocess needs. 930 * Given input image and preprocessing parameters, and function outputs the blob. 931 * 932 * @param image input image (all with 1-, 3- or 4-channels). 933 * @return 4-dimensional Mat. 934 */ 935 public static Mat blobFromImageWithParams(Mat image) { 936 return new Mat(blobFromImageWithParams_1(image.nativeObj)); 937 } 938 939 940 // 941 // C++: void cv::dnn::blobFromImageWithParams(Mat image, Mat& blob, Image2BlobParams param = Image2BlobParams()) 942 // 943 944 public static void blobFromImageWithParams(Mat image, Mat blob, Image2BlobParams param) { 945 blobFromImageWithParams_2(image.nativeObj, blob.nativeObj, param.nativeObj); 946 } 947 948 public static void blobFromImageWithParams(Mat image, Mat blob) { 949 blobFromImageWithParams_3(image.nativeObj, blob.nativeObj); 950 } 951 952 953 // 954 // C++: Mat cv::dnn::blobFromImagesWithParams(vector_Mat images, Image2BlobParams param = Image2BlobParams()) 955 // 956 957 /** 958 * Creates 4-dimensional blob from series of images with given params. 959 * 960 * This function is an extension of REF: blobFromImages to meet more image preprocess needs. 961 * Given input image and preprocessing parameters, and function outputs the blob. 962 * 963 * @param images input image (all with 1-, 3- or 4-channels). 964 * @param param struct of Image2BlobParams, contains all parameters needed by processing of image to blob. 965 * @return 4-dimensional Mat. 966 */ 967 public static Mat blobFromImagesWithParams(List<Mat> images, Image2BlobParams param) { 968 Mat images_mat = Converters.vector_Mat_to_Mat(images); 969 return new Mat(blobFromImagesWithParams_0(images_mat.nativeObj, param.nativeObj)); 970 } 971 972 /** 973 * Creates 4-dimensional blob from series of images with given params. 974 * 975 * This function is an extension of REF: blobFromImages to meet more image preprocess needs. 976 * Given input image and preprocessing parameters, and function outputs the blob. 977 * 978 * @param images input image (all with 1-, 3- or 4-channels). 979 * @return 4-dimensional Mat. 980 */ 981 public static Mat blobFromImagesWithParams(List<Mat> images) { 982 Mat images_mat = Converters.vector_Mat_to_Mat(images); 983 return new Mat(blobFromImagesWithParams_1(images_mat.nativeObj)); 984 } 985 986 987 // 988 // C++: void cv::dnn::blobFromImagesWithParams(vector_Mat images, Mat& blob, Image2BlobParams param = Image2BlobParams()) 989 // 990 991 public static void blobFromImagesWithParams(List<Mat> images, Mat blob, Image2BlobParams param) { 992 Mat images_mat = Converters.vector_Mat_to_Mat(images); 993 blobFromImagesWithParams_2(images_mat.nativeObj, blob.nativeObj, param.nativeObj); 994 } 995 996 public static void blobFromImagesWithParams(List<Mat> images, Mat blob) { 997 Mat images_mat = Converters.vector_Mat_to_Mat(images); 998 blobFromImagesWithParams_3(images_mat.nativeObj, blob.nativeObj); 999 } 1000 1001 1002 // 1003 // C++: void cv::dnn::imagesFromBlob(Mat blob_, vector_Mat& images_) 1004 // 1005 1006 /** 1007 * Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure 1008 * (std::vector<cv::Mat>). 1009 * @param blob_ 4 dimensional array (images, channels, height, width) in floating point precision (CV_32F) from 1010 * which you would like to extract the images. 1011 * @param images_ array of 2D Mat containing the images extracted from the blob in floating point precision 1012 * (CV_32F). They are non normalized neither mean added. The number of returned images equals the first dimension 1013 * of the blob (batch size). Every image has a number of channels equals to the second dimension of the blob (depth). 1014 */ 1015 public static void imagesFromBlob(Mat blob_, List<Mat> images_) { 1016 Mat images__mat = new Mat(); 1017 imagesFromBlob_0(blob_.nativeObj, images__mat.nativeObj); 1018 Converters.Mat_to_vector_Mat(images__mat, images_); 1019 images__mat.release(); 1020 } 1021 1022 1023 // 1024 // C++: void cv::dnn::shrinkCaffeModel(String src, String dst, vector_String layersTypes = std::vector<String>()) 1025 // 1026 1027 /** 1028 * Convert all weights of Caffe network to half precision floating point. 1029 * @param src Path to origin model from Caffe framework contains single 1030 * precision floating point weights (usually has {@code .caffemodel} extension). 1031 * @param dst Path to destination model with updated weights. 1032 * @param layersTypes Set of layers types which parameters will be converted. 1033 * By default, converts only Convolutional and Fully-Connected layers' 1034 * weights. 1035 * 1036 * <b>Note:</b> Shrinked model has no origin float32 weights so it can't be used 1037 * in origin Caffe framework anymore. However the structure of data 1038 * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe. 1039 * So the resulting model may be used there. 1040 */ 1041 public static void shrinkCaffeModel(String src, String dst, List<String> layersTypes) { 1042 shrinkCaffeModel_0(src, dst, layersTypes); 1043 } 1044 1045 /** 1046 * Convert all weights of Caffe network to half precision floating point. 1047 * @param src Path to origin model from Caffe framework contains single 1048 * precision floating point weights (usually has {@code .caffemodel} extension). 1049 * @param dst Path to destination model with updated weights. 1050 * By default, converts only Convolutional and Fully-Connected layers' 1051 * weights. 1052 * 1053 * <b>Note:</b> Shrinked model has no origin float32 weights so it can't be used 1054 * in origin Caffe framework anymore. However the structure of data 1055 * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe. 1056 * So the resulting model may be used there. 1057 */ 1058 public static void shrinkCaffeModel(String src, String dst) { 1059 shrinkCaffeModel_1(src, dst); 1060 } 1061 1062 1063 // 1064 // C++: void cv::dnn::writeTextGraph(String model, String output) 1065 // 1066 1067 /** 1068 * Create a text representation for a binary network stored in protocol buffer format. 1069 * @param model A path to binary network. 1070 * @param output A path to output text file to be created. 1071 * 1072 * <b>Note:</b> To reduce output file size, trained weights are not included. 1073 */ 1074 public static void writeTextGraph(String model, String output) { 1075 writeTextGraph_0(model, output); 1076 } 1077 1078 1079 // 1080 // C++: void cv::dnn::NMSBoxes(vector_Rect2d bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1081 // 1082 1083 /** 1084 * Performs non maximum suppression given boxes and corresponding scores. 1085 * 1086 * @param bboxes a set of bounding boxes to apply NMS. 1087 * @param scores a set of corresponding confidences. 1088 * @param score_threshold a threshold used to filter boxes by score. 1089 * @param nms_threshold a threshold used in non maximum suppression. 1090 * @param indices the kept indices of bboxes after NMS. 1091 * @param eta a coefficient in adaptive threshold formula: \(nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\). 1092 * @param top_k if {@code >0}, keep at most {@code top_k} picked indices. 1093 */ 1094 public static void NMSBoxes(MatOfRect2d bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices, float eta, int top_k) { 1095 Mat bboxes_mat = bboxes; 1096 Mat scores_mat = scores; 1097 Mat indices_mat = indices; 1098 NMSBoxes_0(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta, top_k); 1099 } 1100 1101 /** 1102 * Performs non maximum suppression given boxes and corresponding scores. 1103 * 1104 * @param bboxes a set of bounding boxes to apply NMS. 1105 * @param scores a set of corresponding confidences. 1106 * @param score_threshold a threshold used to filter boxes by score. 1107 * @param nms_threshold a threshold used in non maximum suppression. 1108 * @param indices the kept indices of bboxes after NMS. 1109 * @param eta a coefficient in adaptive threshold formula: \(nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\). 1110 */ 1111 public static void NMSBoxes(MatOfRect2d bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices, float eta) { 1112 Mat bboxes_mat = bboxes; 1113 Mat scores_mat = scores; 1114 Mat indices_mat = indices; 1115 NMSBoxes_1(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta); 1116 } 1117 1118 /** 1119 * Performs non maximum suppression given boxes and corresponding scores. 1120 * 1121 * @param bboxes a set of bounding boxes to apply NMS. 1122 * @param scores a set of corresponding confidences. 1123 * @param score_threshold a threshold used to filter boxes by score. 1124 * @param nms_threshold a threshold used in non maximum suppression. 1125 * @param indices the kept indices of bboxes after NMS. 1126 */ 1127 public static void NMSBoxes(MatOfRect2d bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices) { 1128 Mat bboxes_mat = bboxes; 1129 Mat scores_mat = scores; 1130 Mat indices_mat = indices; 1131 NMSBoxes_2(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj); 1132 } 1133 1134 1135 // 1136 // C++: void cv::dnn::NMSBoxes(vector_RotatedRect bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1137 // 1138 1139 public static void NMSBoxesRotated(MatOfRotatedRect bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices, float eta, int top_k) { 1140 Mat bboxes_mat = bboxes; 1141 Mat scores_mat = scores; 1142 Mat indices_mat = indices; 1143 NMSBoxesRotated_0(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta, top_k); 1144 } 1145 1146 public static void NMSBoxesRotated(MatOfRotatedRect bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices, float eta) { 1147 Mat bboxes_mat = bboxes; 1148 Mat scores_mat = scores; 1149 Mat indices_mat = indices; 1150 NMSBoxesRotated_1(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta); 1151 } 1152 1153 public static void NMSBoxesRotated(MatOfRotatedRect bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices) { 1154 Mat bboxes_mat = bboxes; 1155 Mat scores_mat = scores; 1156 Mat indices_mat = indices; 1157 NMSBoxesRotated_2(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj); 1158 } 1159 1160 1161 // 1162 // C++: void cv::dnn::NMSBoxesBatched(vector_Rect2d bboxes, vector_float scores, vector_int class_ids, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1163 // 1164 1165 /** 1166 * Performs batched non maximum suppression on given boxes and corresponding scores across different classes. 1167 * 1168 * @param bboxes a set of bounding boxes to apply NMS. 1169 * @param scores a set of corresponding confidences. 1170 * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0. 1171 * @param score_threshold a threshold used to filter boxes by score. 1172 * @param nms_threshold a threshold used in non maximum suppression. 1173 * @param indices the kept indices of bboxes after NMS. 1174 * @param eta a coefficient in adaptive threshold formula: \(nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\). 1175 * @param top_k if {@code >0}, keep at most {@code top_k} picked indices. 1176 */ 1177 public static void NMSBoxesBatched(MatOfRect2d bboxes, MatOfFloat scores, MatOfInt class_ids, float score_threshold, float nms_threshold, MatOfInt indices, float eta, int top_k) { 1178 Mat bboxes_mat = bboxes; 1179 Mat scores_mat = scores; 1180 Mat class_ids_mat = class_ids; 1181 Mat indices_mat = indices; 1182 NMSBoxesBatched_0(bboxes_mat.nativeObj, scores_mat.nativeObj, class_ids_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta, top_k); 1183 } 1184 1185 /** 1186 * Performs batched non maximum suppression on given boxes and corresponding scores across different classes. 1187 * 1188 * @param bboxes a set of bounding boxes to apply NMS. 1189 * @param scores a set of corresponding confidences. 1190 * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0. 1191 * @param score_threshold a threshold used to filter boxes by score. 1192 * @param nms_threshold a threshold used in non maximum suppression. 1193 * @param indices the kept indices of bboxes after NMS. 1194 * @param eta a coefficient in adaptive threshold formula: \(nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\). 1195 */ 1196 public static void NMSBoxesBatched(MatOfRect2d bboxes, MatOfFloat scores, MatOfInt class_ids, float score_threshold, float nms_threshold, MatOfInt indices, float eta) { 1197 Mat bboxes_mat = bboxes; 1198 Mat scores_mat = scores; 1199 Mat class_ids_mat = class_ids; 1200 Mat indices_mat = indices; 1201 NMSBoxesBatched_1(bboxes_mat.nativeObj, scores_mat.nativeObj, class_ids_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, eta); 1202 } 1203 1204 /** 1205 * Performs batched non maximum suppression on given boxes and corresponding scores across different classes. 1206 * 1207 * @param bboxes a set of bounding boxes to apply NMS. 1208 * @param scores a set of corresponding confidences. 1209 * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0. 1210 * @param score_threshold a threshold used to filter boxes by score. 1211 * @param nms_threshold a threshold used in non maximum suppression. 1212 * @param indices the kept indices of bboxes after NMS. 1213 */ 1214 public static void NMSBoxesBatched(MatOfRect2d bboxes, MatOfFloat scores, MatOfInt class_ids, float score_threshold, float nms_threshold, MatOfInt indices) { 1215 Mat bboxes_mat = bboxes; 1216 Mat scores_mat = scores; 1217 Mat class_ids_mat = class_ids; 1218 Mat indices_mat = indices; 1219 NMSBoxesBatched_2(bboxes_mat.nativeObj, scores_mat.nativeObj, class_ids_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj); 1220 } 1221 1222 1223 // 1224 // C++: void cv::dnn::softNMSBoxes(vector_Rect bboxes, vector_float scores, vector_float& updated_scores, float score_threshold, float nms_threshold, vector_int& indices, size_t top_k = 0, float sigma = 0.5, SoftNMSMethod method = SoftNMSMethod::SOFTNMS_GAUSSIAN) 1225 // 1226 1227 /** 1228 * Performs soft non maximum suppression given boxes and corresponding scores. 1229 * Reference: https://arxiv.org/abs/1704.04503 1230 * @param bboxes a set of bounding boxes to apply Soft NMS. 1231 * @param scores a set of corresponding confidences. 1232 * @param updated_scores a set of corresponding updated confidences. 1233 * @param score_threshold a threshold used to filter boxes by score. 1234 * @param nms_threshold a threshold used in non maximum suppression. 1235 * @param indices the kept indices of bboxes after NMS. 1236 * @param top_k keep at most {@code top_k} picked indices. 1237 * @param sigma parameter of Gaussian weighting. 1238 * SEE: SoftNMSMethod 1239 */ 1240 public static void softNMSBoxes(MatOfRect bboxes, MatOfFloat scores, MatOfFloat updated_scores, float score_threshold, float nms_threshold, MatOfInt indices, long top_k, float sigma) { 1241 Mat bboxes_mat = bboxes; 1242 Mat scores_mat = scores; 1243 Mat updated_scores_mat = updated_scores; 1244 Mat indices_mat = indices; 1245 softNMSBoxes_0(bboxes_mat.nativeObj, scores_mat.nativeObj, updated_scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, top_k, sigma); 1246 } 1247 1248 /** 1249 * Performs soft non maximum suppression given boxes and corresponding scores. 1250 * Reference: https://arxiv.org/abs/1704.04503 1251 * @param bboxes a set of bounding boxes to apply Soft NMS. 1252 * @param scores a set of corresponding confidences. 1253 * @param updated_scores a set of corresponding updated confidences. 1254 * @param score_threshold a threshold used to filter boxes by score. 1255 * @param nms_threshold a threshold used in non maximum suppression. 1256 * @param indices the kept indices of bboxes after NMS. 1257 * @param top_k keep at most {@code top_k} picked indices. 1258 * SEE: SoftNMSMethod 1259 */ 1260 public static void softNMSBoxes(MatOfRect bboxes, MatOfFloat scores, MatOfFloat updated_scores, float score_threshold, float nms_threshold, MatOfInt indices, long top_k) { 1261 Mat bboxes_mat = bboxes; 1262 Mat scores_mat = scores; 1263 Mat updated_scores_mat = updated_scores; 1264 Mat indices_mat = indices; 1265 softNMSBoxes_2(bboxes_mat.nativeObj, scores_mat.nativeObj, updated_scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj, top_k); 1266 } 1267 1268 /** 1269 * Performs soft non maximum suppression given boxes and corresponding scores. 1270 * Reference: https://arxiv.org/abs/1704.04503 1271 * @param bboxes a set of bounding boxes to apply Soft NMS. 1272 * @param scores a set of corresponding confidences. 1273 * @param updated_scores a set of corresponding updated confidences. 1274 * @param score_threshold a threshold used to filter boxes by score. 1275 * @param nms_threshold a threshold used in non maximum suppression. 1276 * @param indices the kept indices of bboxes after NMS. 1277 * SEE: SoftNMSMethod 1278 */ 1279 public static void softNMSBoxes(MatOfRect bboxes, MatOfFloat scores, MatOfFloat updated_scores, float score_threshold, float nms_threshold, MatOfInt indices) { 1280 Mat bboxes_mat = bboxes; 1281 Mat scores_mat = scores; 1282 Mat updated_scores_mat = updated_scores; 1283 Mat indices_mat = indices; 1284 softNMSBoxes_3(bboxes_mat.nativeObj, scores_mat.nativeObj, updated_scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj); 1285 } 1286 1287 1288 // 1289 // C++: String cv::dnn::getInferenceEngineBackendType() 1290 // 1291 1292 /** 1293 * Returns Inference Engine internal backend API. 1294 * 1295 * See values of {@code CV_DNN_BACKEND_INFERENCE_ENGINE_*} macros. 1296 * 1297 * {@code OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE} runtime parameter (environment variable) is ignored since 4.6.0. 1298 * 1299 * @deprecated 1300 * @return automatically generated 1301 */ 1302 @Deprecated 1303 public static String getInferenceEngineBackendType() { 1304 return getInferenceEngineBackendType_0(); 1305 } 1306 1307 1308 // 1309 // C++: String cv::dnn::setInferenceEngineBackendType(String newBackendType) 1310 // 1311 1312 /** 1313 * Specify Inference Engine internal backend API. 1314 * 1315 * See values of {@code CV_DNN_BACKEND_INFERENCE_ENGINE_*} macros. 1316 * 1317 * @return previous value of internal backend API 1318 * 1319 * @deprecated 1320 * @param newBackendType automatically generated 1321 */ 1322 @Deprecated 1323 public static String setInferenceEngineBackendType(String newBackendType) { 1324 return setInferenceEngineBackendType_0(newBackendType); 1325 } 1326 1327 1328 // 1329 // C++: void cv::dnn::resetMyriadDevice() 1330 // 1331 1332 /** 1333 * Release a Myriad device (binded by OpenCV). 1334 * 1335 * Single Myriad device cannot be shared across multiple processes which uses 1336 * Inference Engine's Myriad plugin. 1337 */ 1338 public static void resetMyriadDevice() { 1339 resetMyriadDevice_0(); 1340 } 1341 1342 1343 // 1344 // C++: String cv::dnn::getInferenceEngineVPUType() 1345 // 1346 1347 /** 1348 * Returns Inference Engine VPU type. 1349 * 1350 * See values of {@code CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*} macros. 1351 * @return automatically generated 1352 */ 1353 public static String getInferenceEngineVPUType() { 1354 return getInferenceEngineVPUType_0(); 1355 } 1356 1357 1358 // 1359 // C++: String cv::dnn::getInferenceEngineCPUType() 1360 // 1361 1362 /** 1363 * Returns Inference Engine CPU type. 1364 * 1365 * Specify OpenVINO plugin: CPU or ARM. 1366 * @return automatically generated 1367 */ 1368 public static String getInferenceEngineCPUType() { 1369 return getInferenceEngineCPUType_0(); 1370 } 1371 1372 1373 // 1374 // C++: void cv::dnn::releaseHDDLPlugin() 1375 // 1376 1377 /** 1378 * Release a HDDL plugin. 1379 */ 1380 public static void releaseHDDLPlugin() { 1381 releaseHDDLPlugin_0(); 1382 } 1383 1384 1385 1386 1387 // C++: vector_Target cv::dnn::getAvailableTargets(dnn_Backend be) 1388 private static native List<Integer> getAvailableTargets_0(int be); 1389 1390 // C++: Net cv::dnn::readNetFromDarknet(String cfgFile, String darknetModel = String()) 1391 private static native long readNetFromDarknet_0(String cfgFile, String darknetModel); 1392 private static native long readNetFromDarknet_1(String cfgFile); 1393 1394 // C++: Net cv::dnn::readNetFromDarknet(vector_uchar bufferCfg, vector_uchar bufferModel = std::vector<uchar>()) 1395 private static native long readNetFromDarknet_2(long bufferCfg_mat_nativeObj, long bufferModel_mat_nativeObj); 1396 private static native long readNetFromDarknet_3(long bufferCfg_mat_nativeObj); 1397 1398 // C++: Net cv::dnn::readNetFromCaffe(String prototxt, String caffeModel = String()) 1399 private static native long readNetFromCaffe_0(String prototxt, String caffeModel); 1400 private static native long readNetFromCaffe_1(String prototxt); 1401 1402 // C++: Net cv::dnn::readNetFromCaffe(vector_uchar bufferProto, vector_uchar bufferModel = std::vector<uchar>()) 1403 private static native long readNetFromCaffe_2(long bufferProto_mat_nativeObj, long bufferModel_mat_nativeObj); 1404 private static native long readNetFromCaffe_3(long bufferProto_mat_nativeObj); 1405 1406 // C++: Net cv::dnn::readNetFromTensorflow(String model, String config = String()) 1407 private static native long readNetFromTensorflow_0(String model, String config); 1408 private static native long readNetFromTensorflow_1(String model); 1409 1410 // C++: Net cv::dnn::readNetFromTensorflow(vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>()) 1411 private static native long readNetFromTensorflow_2(long bufferModel_mat_nativeObj, long bufferConfig_mat_nativeObj); 1412 private static native long readNetFromTensorflow_3(long bufferModel_mat_nativeObj); 1413 1414 // C++: Net cv::dnn::readNetFromTFLite(String model) 1415 private static native long readNetFromTFLite_0(String model); 1416 1417 // C++: Net cv::dnn::readNetFromTFLite(vector_uchar bufferModel) 1418 private static native long readNetFromTFLite_1(long bufferModel_mat_nativeObj); 1419 1420 // C++: Net cv::dnn::readNetFromTorch(String model, bool isBinary = true, bool evaluate = true) 1421 private static native long readNetFromTorch_0(String model, boolean isBinary, boolean evaluate); 1422 private static native long readNetFromTorch_1(String model, boolean isBinary); 1423 private static native long readNetFromTorch_2(String model); 1424 1425 // C++: Net cv::dnn::readNet(String model, String config = "", String framework = "") 1426 private static native long readNet_0(String model, String config, String framework); 1427 private static native long readNet_1(String model, String config); 1428 private static native long readNet_2(String model); 1429 1430 // C++: Net cv::dnn::readNet(String framework, vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>()) 1431 private static native long readNet_3(String framework, long bufferModel_mat_nativeObj, long bufferConfig_mat_nativeObj); 1432 private static native long readNet_4(String framework, long bufferModel_mat_nativeObj); 1433 1434 // C++: Mat cv::dnn::readTorchBlob(String filename, bool isBinary = true) 1435 private static native long readTorchBlob_0(String filename, boolean isBinary); 1436 private static native long readTorchBlob_1(String filename); 1437 1438 // C++: Net cv::dnn::readNetFromModelOptimizer(String xml, String bin) 1439 private static native long readNetFromModelOptimizer_0(String xml, String bin); 1440 1441 // C++: Net cv::dnn::readNetFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights) 1442 private static native long readNetFromModelOptimizer_1(long bufferModelConfig_mat_nativeObj, long bufferWeights_mat_nativeObj); 1443 1444 // C++: Net cv::dnn::readNetFromONNX(String onnxFile) 1445 private static native long readNetFromONNX_0(String onnxFile); 1446 1447 // C++: Net cv::dnn::readNetFromONNX(vector_uchar buffer) 1448 private static native long readNetFromONNX_1(long buffer_mat_nativeObj); 1449 1450 // C++: Mat cv::dnn::readTensorFromONNX(String path) 1451 private static native long readTensorFromONNX_0(String path); 1452 1453 // C++: Mat cv::dnn::blobFromImage(Mat image, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F) 1454 private static native long blobFromImage_0(long image_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB, boolean crop, int ddepth); 1455 private static native long blobFromImage_1(long image_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB, boolean crop); 1456 private static native long blobFromImage_2(long image_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB); 1457 private static native long blobFromImage_3(long image_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3); 1458 private static native long blobFromImage_4(long image_nativeObj, double scalefactor, double size_width, double size_height); 1459 private static native long blobFromImage_5(long image_nativeObj, double scalefactor); 1460 private static native long blobFromImage_6(long image_nativeObj); 1461 1462 // C++: Mat cv::dnn::blobFromImages(vector_Mat images, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F) 1463 private static native long blobFromImages_0(long images_mat_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB, boolean crop, int ddepth); 1464 private static native long blobFromImages_1(long images_mat_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB, boolean crop); 1465 private static native long blobFromImages_2(long images_mat_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3, boolean swapRB); 1466 private static native long blobFromImages_3(long images_mat_nativeObj, double scalefactor, double size_width, double size_height, double mean_val0, double mean_val1, double mean_val2, double mean_val3); 1467 private static native long blobFromImages_4(long images_mat_nativeObj, double scalefactor, double size_width, double size_height); 1468 private static native long blobFromImages_5(long images_mat_nativeObj, double scalefactor); 1469 private static native long blobFromImages_6(long images_mat_nativeObj); 1470 1471 // C++: Mat cv::dnn::blobFromImageWithParams(Mat image, Image2BlobParams param = Image2BlobParams()) 1472 private static native long blobFromImageWithParams_0(long image_nativeObj, long param_nativeObj); 1473 private static native long blobFromImageWithParams_1(long image_nativeObj); 1474 1475 // C++: void cv::dnn::blobFromImageWithParams(Mat image, Mat& blob, Image2BlobParams param = Image2BlobParams()) 1476 private static native void blobFromImageWithParams_2(long image_nativeObj, long blob_nativeObj, long param_nativeObj); 1477 private static native void blobFromImageWithParams_3(long image_nativeObj, long blob_nativeObj); 1478 1479 // C++: Mat cv::dnn::blobFromImagesWithParams(vector_Mat images, Image2BlobParams param = Image2BlobParams()) 1480 private static native long blobFromImagesWithParams_0(long images_mat_nativeObj, long param_nativeObj); 1481 private static native long blobFromImagesWithParams_1(long images_mat_nativeObj); 1482 1483 // C++: void cv::dnn::blobFromImagesWithParams(vector_Mat images, Mat& blob, Image2BlobParams param = Image2BlobParams()) 1484 private static native void blobFromImagesWithParams_2(long images_mat_nativeObj, long blob_nativeObj, long param_nativeObj); 1485 private static native void blobFromImagesWithParams_3(long images_mat_nativeObj, long blob_nativeObj); 1486 1487 // C++: void cv::dnn::imagesFromBlob(Mat blob_, vector_Mat& images_) 1488 private static native void imagesFromBlob_0(long blob__nativeObj, long images__mat_nativeObj); 1489 1490 // C++: void cv::dnn::shrinkCaffeModel(String src, String dst, vector_String layersTypes = std::vector<String>()) 1491 private static native void shrinkCaffeModel_0(String src, String dst, List<String> layersTypes); 1492 private static native void shrinkCaffeModel_1(String src, String dst); 1493 1494 // C++: void cv::dnn::writeTextGraph(String model, String output) 1495 private static native void writeTextGraph_0(String model, String output); 1496 1497 // C++: void cv::dnn::NMSBoxes(vector_Rect2d bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1498 private static native void NMSBoxes_0(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta, int top_k); 1499 private static native void NMSBoxes_1(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta); 1500 private static native void NMSBoxes_2(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj); 1501 1502 // C++: void cv::dnn::NMSBoxes(vector_RotatedRect bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1503 private static native void NMSBoxesRotated_0(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta, int top_k); 1504 private static native void NMSBoxesRotated_1(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta); 1505 private static native void NMSBoxesRotated_2(long bboxes_mat_nativeObj, long scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj); 1506 1507 // C++: void cv::dnn::NMSBoxesBatched(vector_Rect2d bboxes, vector_float scores, vector_int class_ids, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0) 1508 private static native void NMSBoxesBatched_0(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long class_ids_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta, int top_k); 1509 private static native void NMSBoxesBatched_1(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long class_ids_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, float eta); 1510 private static native void NMSBoxesBatched_2(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long class_ids_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj); 1511 1512 // C++: void cv::dnn::softNMSBoxes(vector_Rect bboxes, vector_float scores, vector_float& updated_scores, float score_threshold, float nms_threshold, vector_int& indices, size_t top_k = 0, float sigma = 0.5, SoftNMSMethod method = SoftNMSMethod::SOFTNMS_GAUSSIAN) 1513 private static native void softNMSBoxes_0(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long updated_scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, long top_k, float sigma); 1514 private static native void softNMSBoxes_2(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long updated_scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj, long top_k); 1515 private static native void softNMSBoxes_3(long bboxes_mat_nativeObj, long scores_mat_nativeObj, long updated_scores_mat_nativeObj, float score_threshold, float nms_threshold, long indices_mat_nativeObj); 1516 1517 // C++: String cv::dnn::getInferenceEngineBackendType() 1518 private static native String getInferenceEngineBackendType_0(); 1519 1520 // C++: String cv::dnn::setInferenceEngineBackendType(String newBackendType) 1521 private static native String setInferenceEngineBackendType_0(String newBackendType); 1522 1523 // C++: void cv::dnn::resetMyriadDevice() 1524 private static native void resetMyriadDevice_0(); 1525 1526 // C++: String cv::dnn::getInferenceEngineVPUType() 1527 private static native String getInferenceEngineVPUType_0(); 1528 1529 // C++: String cv::dnn::getInferenceEngineCPUType() 1530 private static native String getInferenceEngineCPUType_0(); 1531 1532 // C++: void cv::dnn::releaseHDDLPlugin() 1533 private static native void releaseHDDLPlugin_0(); 1534 1535}