update gitignore

This commit is contained in:
2026-01-20 20:00:16 +08:00
parent bc9f2824ed
commit 4882dc1a67
358 changed files with 1 additions and 161239 deletions

View File

@@ -1,70 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP
#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP
#include <opencv2/gapi/util/any.hpp>
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer/ie.hpp> // Params
#include <string>
namespace cv {
namespace gapi {
namespace ie {
// NB: Used by python wrapper
// This class can be marked as SIMPLE, because it's implemented as pimpl
class GAPI_EXPORTS_W_SIMPLE PyParams {
public:
GAPI_WRAP
PyParams() = default;
GAPI_WRAP
PyParams(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device);
GAPI_WRAP
PyParams(const std::string &tag,
const std::string &model,
const std::string &device);
GAPI_WRAP
PyParams& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR);
GAPI_WRAP
PyParams& cfgNumRequests(size_t nireq);
GAPI_WRAP
PyParams& cfgBatchSize(const size_t size);
GBackend backend() const;
std::string tag() const;
cv::util::any params() const;
private:
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
};
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device);
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &model,
const std::string &device);
} // namespace ie
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP

View File

@@ -1,74 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level
// directory of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_GAPI_INFER_BINDINGS_ONNX_HPP
#define OPENCV_GAPI_INFER_BINDINGS_ONNX_HPP
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer/onnx.hpp> // Params
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
#include <opencv2/gapi/util/any.hpp>
#include <string>
namespace cv {
namespace gapi {
namespace onnx {
// NB: Used by python wrapper
// This class can be marked as SIMPLE, because it's implemented as pimpl
class GAPI_EXPORTS_W_SIMPLE PyParams {
public:
GAPI_WRAP
PyParams() = default;
GAPI_WRAP
PyParams(const std::string& tag, const std::string& model_path);
GAPI_WRAP
PyParams& cfgMeanStd(const std::string &layer_name,
const cv::Scalar &m,
const cv::Scalar &s);
GAPI_WRAP
PyParams& cfgNormalize(const std::string &layer_name, bool flag);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::OpenVINO ep);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::DirectML ep);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::CoreML ep);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::CUDA ep);
GAPI_WRAP
PyParams& cfgAddExecutionProvider(ep::TensorRT ep);
GAPI_WRAP
PyParams& cfgDisableMemPattern();
GAPI_WRAP
PyParams& cfgSessionOptions(const std::map<std::string, std::string>& options);
GAPI_WRAP
PyParams& cfgOptLevel(const int opt_level);
GBackend backend() const;
std::string tag() const;
cv::util::any params() const;
private:
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
};
GAPI_EXPORTS_W PyParams params(const std::string& tag, const std::string& model_path);
} // namespace onnx
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_BINDINGS_ONNX_HPP

View File

@@ -1,128 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2023 Intel Corporation
#ifndef OPENCV_GAPI_INFER_BINDINGS_OV_HPP
#define OPENCV_GAPI_INFER_BINDINGS_OV_HPP
#include <opencv2/gapi/util/any.hpp>
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer/ov.hpp> // Params
#include <string>
namespace cv {
namespace gapi {
namespace ov {
// NB: Used by python wrapper
// This class can be marked as SIMPLE, because it's implemented as pimpl
class GAPI_EXPORTS_W_SIMPLE PyParams {
public:
GAPI_WRAP
PyParams() = default;
GAPI_WRAP
PyParams(const std::string &tag,
const std::string &model_path,
const std::string &bin_path,
const std::string &device);
GAPI_WRAP
PyParams(const std::string &tag,
const std::string &blob_path,
const std::string &device);
GAPI_WRAP
PyParams& cfgPluginConfig(
const std::map<std::string, std::string> &config);
GAPI_WRAP
PyParams& cfgInputTensorLayout(std::string tensor_layout);
GAPI_WRAP
PyParams& cfgInputTensorLayout(
std::map<std::string, std::string> layout_map);
GAPI_WRAP
PyParams& cfgInputModelLayout(std::string tensor_layout);
GAPI_WRAP
PyParams& cfgInputModelLayout(
std::map<std::string, std::string> layout_map);
GAPI_WRAP
PyParams& cfgOutputTensorLayout(std::string tensor_layout);
GAPI_WRAP
PyParams& cfgOutputTensorLayout(
std::map<std::string, std::string> layout_map);
GAPI_WRAP
PyParams& cfgOutputModelLayout(std::string tensor_layout);
GAPI_WRAP
PyParams& cfgOutputModelLayout(
std::map<std::string, std::string> layout_map);
GAPI_WRAP
PyParams& cfgOutputTensorPrecision(int precision);
GAPI_WRAP
PyParams& cfgOutputTensorPrecision(
std::map<std::string, int> precision_map);
GAPI_WRAP
PyParams& cfgReshape(std::vector<size_t> new_shape);
GAPI_WRAP
PyParams& cfgReshape(
std::map<std::string, std::vector<size_t>> new_shape_map);
GAPI_WRAP
PyParams& cfgNumRequests(const size_t nireq);
GAPI_WRAP
PyParams& cfgMean(std::vector<float> mean_values);
GAPI_WRAP
PyParams& cfgMean(
std::map<std::string, std::vector<float>> mean_map);
GAPI_WRAP
PyParams& cfgScale(std::vector<float> scale_values);
GAPI_WRAP
PyParams& cfgScale(
std::map<std::string, std::vector<float>> scale_map);
GAPI_WRAP
PyParams& cfgResize(int interpolation);
GAPI_WRAP
PyParams& cfgResize(std::map<std::string, int> interpolation);
GBackend backend() const;
std::string tag() const;
cv::util::any params() const;
private:
std::shared_ptr<Params<cv::gapi::Generic>> m_priv;
};
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &model_path,
const std::string &weights,
const std::string &device);
GAPI_EXPORTS_W PyParams params(const std::string &tag,
const std::string &bin_path,
const std::string &device);
} // namespace ov
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_BINDINGS_OV_HPP

View File

@@ -1,711 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019-2023 Intel Corporation
#ifndef OPENCV_GAPI_INFER_IE_HPP
#define OPENCV_GAPI_INFER_IE_HPP
#include <unordered_map>
#include <unordered_set>
#include <string>
#include <array>
#include <tuple> // tuple, tuple_size
#include <map>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer.hpp> // Generic
#include <opencv2/gapi/streaming/onevpl/accel_types.hpp> // Preproc Dev & Ctx
namespace cv {
namespace gapi {
// FIXME: introduce a new sub-namespace for NN?
/**
* @brief This namespace contains G-API OpenVINO backend functions,
* structures, and symbols.
*/
namespace ie {
GAPI_EXPORTS cv::gapi::GBackend backend();
/**
* Specifies how G-API and IE should trait input data
*
* In OpenCV, the same cv::Mat is used to represent both
* image and tensor data. Sometimes those are hardly distinguishable,
* so this extra parameter is used to give G-API a hint.
*
* This hint controls how G-API reinterprets the data when converting
* it to IE Blob format (and which layout/etc is assigned to this data).
*/
enum class TraitAs: int
{
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor and passes dimensions as-is
IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc)
};
using IEConfig = std::map<std::string, std::string>;
enum InferMode {Sync, Async};
namespace detail {
template <typename T>
using AttrMap = std::map<std::string, T>;
// NB: This type is used to hold in/out layers
// attributes such as precision, layout, shape etc.
//
// User can provide attributes either:
// 1. cv::util::monostate - No value specified explicitly.
// 2. Attr - value specified explicitly that should be broadcasted to all layers.
// 3. AttrMap[str->T] - map specifies value for particular layer.
template <typename Attr>
using LayerVariantAttr = cv::util::variant< cv::util::monostate
, AttrMap<Attr>
, Attr>;
struct ParamDesc {
std::string model_path;
std::string weights_path;
std::string device_id;
std::vector<std::string> input_names;
std::vector<std::string> output_names;
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs;
std::size_t num_in;
std::size_t num_out;
enum class Kind {Load, Import};
Kind kind;
bool is_generic;
IEConfig config;
std::map<std::string, std::vector<std::size_t>> reshape_table;
std::unordered_set<std::string> layer_names_to_reshape;
// NB: Number of asyncrhonious infer requests
size_t nireq;
// NB: An optional config to setup RemoteContext for IE
cv::util::any context_config;
// NB: batch_size can't be equal to 1 by default, because some of models
// have 2D (Layout::NC) input and if the first dimension not equal to 1
// net.setBatchSize(1) will overwrite it.
cv::optional<size_t> batch_size;
cv::optional<cv::gapi::wip::onevpl::Device> vpl_preproc_device;
cv::optional<cv::gapi::wip::onevpl::Context> vpl_preproc_ctx;
InferMode mode;
using PrecisionT = int;
using PrecisionMapT = std::unordered_map<std::string, PrecisionT>;
// NB: This parameter can contain:
// 1. cv::util::monostate - Don't specify precision, but use default from IR/Blob.
// 2. PrecisionT (CV_8U, CV_32F, ...) - Specifies precision for all output layers.
// 3. PrecisionMapT ({{"layer0", CV_32F}, {"layer1", CV_16F}} - Specifies precision for certain output layer.
// cv::util::monostate is default value that means precision wasn't specified.
using PrecisionVariantT = cv::util::variant<cv::util::monostate,
PrecisionT,
PrecisionMapT>;
PrecisionVariantT output_precision;
LayerVariantAttr<std::string> input_layout;
LayerVariantAttr<std::string> output_layout;
LayerVariantAttr<int> interpolation;
};
} // namespace detail
// FIXME: this is probably a shared (reusable) thing
template<typename Net>
struct PortCfg {
using In = std::array
< std::string
, std::tuple_size<typename Net::InArgs>::value >;
using Out = std::array
< std::string
, std::tuple_size<typename Net::OutArgs>::value >;
};
/**
* @brief This structure provides functions
* that fill inference parameters for "OpenVINO Toolkit" model.
*/
template<typename Net> class Params {
public:
/** @brief Class constructor.
Constructs Params based on model information and specifies default values for other
inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
@param model Path to topology IR (.xml file).
@param weights Path to weights (.bin file).
@param device target device to use.
*/
Params(const std::string &model,
const std::string &weights,
const std::string &device)
: desc{ model, weights, device, {}, {}, {}
, std::tuple_size<typename Net::InArgs>::value // num_in
, std::tuple_size<typename Net::OutArgs>::value // num_out
, detail::ParamDesc::Kind::Load
, false
, {}
, {}
, {}
, 1u
, {}
, {}
, {}
, {}
, InferMode::Async
, {}
, {}
, {}
, {} } {
}
/** @overload
Use this constructor to work with pre-compiled network.
Model is imported from a pre-compiled blob.
@param model Path to model.
@param device target device to use.
*/
Params(const std::string &model,
const std::string &device)
: desc{ model, {}, device, {}, {}, {}
, std::tuple_size<typename Net::InArgs>::value // num_in
, std::tuple_size<typename Net::OutArgs>::value // num_out
, detail::ParamDesc::Kind::Import
, false
, {}
, {}
, {}
, 1u
, {}
, {}
, {}
, {}
, InferMode::Async
, {}
, {}
, {}
, {} } {
}
/** @brief Specifies sequence of network input layers names for inference.
The function is used to associate cv::gapi::infer<> inputs with the model inputs.
Number of names has to match the number of network inputs as defined in G_API_NET().
In case a network has only single input layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains names of input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
desc.input_names.clear();
desc.input_names.reserve(layer_names.size());
std::copy(layer_names.begin(), layer_names.end(),
std::back_inserter(desc.input_names));
return *this;
}
/** @brief Specifies sequence of network output layers names for inference.
The function is used to associate cv::gapi::infer<> outputs with the model outputs.
Number of names has to match the number of network outputs as defined in G_API_NET().
In case a network has only single output layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of outputs
as defined in the @ref G_API_NET. Contains names of output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
desc.output_names.clear();
desc.output_names.reserve(layer_names.size());
std::copy(layer_names.begin(), layer_names.end(),
std::back_inserter(desc.output_names));
return *this;
}
/** @brief Specifies a constant input.
The function is used to set a constant input. This input has to be
a preprocessed tensor if its type is TENSOR. Need to provide name of the
network layer which will receive provided data.
@param layer_name Name of network layer.
@param data cv::Mat that contains data which will be associated with network layer.
@param hint Input type @sa cv::gapi::ie::TraitAs.
@return reference to this parameter structure.
*/
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
desc.const_inputs[layer_name] = {data, hint};
return *this;
}
/** @brief Specifies OpenVINO plugin configuration.
The function is used to set configuration for OpenVINO plugin. Some parameters
can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
to check information about specific plugin.
@param cfg Map of pairs: (config parameter name, config parameter value).
@return reference to this parameter structure.
*/
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
return *this;
}
/** @overload
Function with a rvalue parameter.
@param cfg rvalue map of pairs: (config parameter name, config parameter value).
@return reference to this parameter structure.
*/
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
return *this;
}
/** @brief Specifies configuration for RemoteContext in InferenceEngine.
When RemoteContext is configured the backend imports the networks using the context.
It also expects cv::MediaFrames to be actually remote, to operate with blobs via the context.
@param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
@return reference to this parameter structure.
*/
Params& cfgContextParams(const cv::util::any& ctx_cfg) {
desc.context_config = ctx_cfg;
return *this;
}
/** @overload
Function with an rvalue parameter.
@param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap.
@return reference to this parameter structure.
*/
Params& cfgContextParams(cv::util::any&& ctx_cfg) {
desc.context_config = std::move(ctx_cfg);
return *this;
}
/** @brief Specifies number of asynchronous inference requests.
@param nireq Number of inference asynchronous requests.
@return reference to this parameter structure.
*/
Params& cfgNumRequests(size_t nireq) {
GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
desc.nireq = nireq;
return *this;
}
/** @brief Specifies new input shapes for the network inputs.
The function is used to specify new input shapes for the network inputs.
Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1networkNetwork.html
for additional information.
@param reshape_table Map of pairs: name of corresponding data and its dimension.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>& reshape_table) {
desc.reshape_table = reshape_table;
return *this;
}
/** @overload */
Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) {
desc.reshape_table = std::move(reshape_table);
return *this;
}
/** @overload
@param layer_name Name of layer.
@param layer_dims New dimensions for this layer.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::string& layer_name, const std::vector<size_t>& layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
/** @overload */
Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
/** @overload
@param layer_names set of names of network layers that will be used for network reshape.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::unordered_set<std::string>& layer_names) {
desc.layer_names_to_reshape = layer_names;
return *this;
}
/** @overload
@param layer_names rvalue set of the selected layers will be reshaped automatically
its input image size.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) {
desc.layer_names_to_reshape = std::move(layer_names);
return *this;
}
/** @brief Specifies the inference batch size.
The function is used to specify inference batch size.
Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1CNNNetwork.html#a8e9d19270a48aab50cb5b1c43eecb8e9 for additional information
@param size batch size which will be used.
@return reference to this parameter structure.
*/
Params<Net>& cfgBatchSize(const size_t size) {
desc.batch_size = cv::util::make_optional(size);
return *this;
}
Params<Net>& cfgPreprocessingParams(const cv::gapi::wip::onevpl::Device &device,
const cv::gapi::wip::onevpl::Context &ctx) {
desc.vpl_preproc_device = cv::util::make_optional(device);
desc.vpl_preproc_ctx = cv::util::make_optional(ctx);
return *this;
}
/** @brief Specifies which api will be used to run inference.
The function is used to specify mode for OpenVINO inference.
OpenVINO has two options to run inference:
1. Asynchronous (using StartAsync: https://docs.openvino.ai/latest/classInferenceEngine_1_1InferRequest.html#doxid-class-inference-engine-1-1-infer-request-1a405293e8423d82a5b45f642a3bef0d24)
2. Synchronous (using Infer: https://docs.openvino.ai/latest/classInferenceEngine_1_1InferRequest.html#doxid-class-inference-engine-1-1-infer-request-1a3391ce30894abde730523e9ca9371ce8)
By default asynchronous mode is used.
@param mode Inference mode which will be used.
@return reference to this parameter structure.
*/
Params<Net>& cfgInferMode(InferMode mode) {
desc.mode = mode;
return *this;
}
/** @brief Specifies the output precision for model.
The function is used to set an output precision for model.
@param precision Precision in OpenCV format (CV_8U, CV_32F, ...)
will be applied to all output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputPrecision(detail::ParamDesc::PrecisionT precision) {
desc.output_precision = precision;
return *this;
}
/** @overload
@param precision_map Map of pairs: name of corresponding output layer
and its precision in OpenCV format (CV_8U, CV_32F, ...)
@return reference to this parameter structure.
*/
Params<Net>&
cfgOutputPrecision(detail::ParamDesc::PrecisionMapT precision_map) {
desc.output_precision = precision_map;
return *this;
}
/** @brief Specifies the input layout for model.
The function is used to set an input layout for model.
@param layout Layout in string representation ("NCHW", "NHWC", etc)
will be applied to all input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputLayout(std::string layout) {
desc.input_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding input layer
and its layout in string representation ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgInputLayout(detail::AttrMap<std::string> layout_map) {
desc.input_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies the output layout for model.
The function is used to set an output layout for model.
@param layout Layout in string representation ("NCHW", "NHWC", etc)
will be applied to all output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputLayout(std::string layout) {
desc.output_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding output layer
and its layout in string representation ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgOutputLayout(detail::AttrMap<std::string> layout_map) {
desc.output_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies resize interpolation algorithm.
*
The function is used to configure resize preprocessing for input layer.
@param interpolation Resize interpolation algorithm.
Supported algorithms: #INTER_LINEAR, #INTER_AREA.
@return reference to this parameter structure.
*/
Params<Net>& cfgResize(int interpolation) {
desc.interpolation = interpolation;
return *this;
}
/** @overload
@param interpolation Map of pairs: name of corresponding input layer
and its resize algorithm.
@return reference to this parameter structure.
*/
Params<Net>& cfgResize(detail::AttrMap<int> interpolation) {
desc.interpolation = std::move(interpolation);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
};
/*
* @brief This structure provides functions for generic network type that
* fill inference parameters.
* @see struct Generic
*/
template<>
class Params<cv::gapi::Generic> {
public:
/** @brief Class constructor.
Constructs Params based on model information and sets default values for other
inference description parameters. Model is loaded and compiled using OpenVINO Toolkit.
@param tag string tag of the network for which these parameters are intended.
@param model path to topology IR (.xml file).
@param weights path to weights (.bin file).
@param device target device to use.
*/
Params(const std::string &tag,
const std::string &model,
const std::string &weights,
const std::string &device)
: desc{ model, weights, device, {}, {}, {}, 0u, 0u,
detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
{}, {}, {}, {}, InferMode::Async, {}, {}, {}, {} },
m_tag(tag) {
}
/** @overload
This constructor for pre-compiled networks. Model is imported from pre-compiled
blob.
@param tag string tag of the network for which these parameters are intended.
@param model path to model.
@param device target device to use.
*/
Params(const std::string &tag,
const std::string &model,
const std::string &device)
: desc{ model, {}, device, {}, {}, {}, 0u, 0u,
detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
{}, {}, {}, {}, InferMode::Async, {}, {}, {}, {} },
m_tag(tag) {
}
/** @see ie::Params::pluginConfig. */
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
return *this;
}
/** @overload */
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
return *this;
}
/** @see ie::Params::constInput. */
Params& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
desc.const_inputs[layer_name] = {data, hint};
return *this;
}
/** @see ie::Params::cfgNumRequests. */
Params& cfgNumRequests(size_t nireq) {
GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
desc.nireq = nireq;
return *this;
}
/** @see ie::Params::cfgInputReshape */
Params& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>&reshape_table) {
desc.reshape_table = reshape_table;
return *this;
}
/** @overload */
Params& cfgInputReshape(std::map<std::string, std::vector<std::size_t>> && reshape_table) {
desc.reshape_table = std::move(reshape_table);
return *this;
}
/** @overload */
Params& cfgInputReshape(std::string && layer_name, std::vector<size_t> && layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
/** @overload */
Params& cfgInputReshape(const std::string & layer_name, const std::vector<size_t>&layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
/** @overload */
Params& cfgInputReshape(std::unordered_set<std::string> && layer_names) {
desc.layer_names_to_reshape = std::move(layer_names);
return *this;
}
/** @overload */
Params& cfgInputReshape(const std::unordered_set<std::string>&layer_names) {
desc.layer_names_to_reshape = layer_names;
return *this;
}
/** @see ie::Params::cfgBatchSize */
Params& cfgBatchSize(const size_t size) {
desc.batch_size = cv::util::make_optional(size);
return *this;
}
/** @see ie::Params::cfgInferAPI */
Params& cfgInferMode(InferMode mode) {
desc.mode = mode;
return *this;
}
/** @see ie::Params::cfgOutputPrecision */
Params& cfgOutputPrecision(detail::ParamDesc::PrecisionT precision) {
desc.output_precision = precision;
return *this;
}
/** @overload */
Params&
cfgOutputPrecision(detail::ParamDesc::PrecisionMapT precision_map) {
desc.output_precision = precision_map;
return *this;
}
/** @see ie::Params::cfgInputLayout */
Params& cfgInputLayout(std::string layout) {
desc.input_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgInputLayout(detail::AttrMap<std::string> layout_map) {
desc.input_layout = std::move(layout_map);
return *this;
}
/** @see ie::Params::cfgOutputLayout */
Params& cfgOutputLayout(std::string layout) {
desc.output_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgOutputLayout(detail::AttrMap<std::string> layout_map) {
desc.output_layout = std::move(layout_map);
return *this;
}
/** @see ie::Params::cfgResize */
Params& cfgResize(int interpolation) {
desc.interpolation = interpolation;
return *this;
}
/** @overload */
Params& cfgResize(detail::AttrMap<int> interpolation) {
desc.interpolation = std::move(interpolation);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return m_tag; }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
std::string m_tag;
};
} // namespace ie
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_IE_HPP

View File

@@ -1,768 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020-2021 Intel Corporation
#ifndef OPENCV_GAPI_INFER_ONNX_HPP
#define OPENCV_GAPI_INFER_ONNX_HPP
#include <unordered_map>
#include <string>
#include <array>
#include <tuple> // tuple, tuple_size
#include <map>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/util/optional.hpp>
#include <opencv2/core/cvdef.h> // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer.hpp> // Generic
#include <opencv2/gapi/infer/workload_type.hpp>
namespace cv {
namespace gapi {
/**
* @brief This namespace contains G-API ONNX Runtime backend functions, structures, and symbols.
*/
namespace onnx {
/**
* @brief This namespace contains Execution Providers structures for G-API ONNX Runtime backend.
*/
namespace ep {
/**
* @brief This structure provides functions
* that fill inference options for ONNX CoreML Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml-execution-provider
*/
struct GAPI_EXPORTS_W_SIMPLE CoreML {
/** @brief Class constructor.
Constructs CoreML parameters.
*/
GAPI_WRAP
CoreML() = default;
/** @brief Limit CoreML Execution Provider to run on CPU only.
This function is used to limit CoreML to run on CPU only.
Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_use_cpu_only
@return reference to this parameter structure.
*/
GAPI_WRAP
CoreML& cfgUseCPUOnly() {
use_cpu_only = true;
return *this;
}
/** @brief Enable CoreML EP to run on a subgraph in the body of a control flow ONNX operator (i.e. a Loop, Scan or If operator).
This function is used to enable CoreML EP to run on
a subgraph of a control flow of ONNX operation.
Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_enable_on_subgraph
@return reference to this parameter structure.
*/
GAPI_WRAP
CoreML& cfgEnableOnSubgraph() {
enable_on_subgraph = true;
return *this;
}
/** @brief Enable CoreML EP to run only on Apple Neural Engine.
This function is used to enable CoreML EP to run only on Apple Neural Engine.
Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_only_enable_device_with_ane
@return reference to this parameter structure.
*/
GAPI_WRAP
CoreML& cfgEnableOnlyNeuralEngine() {
enable_only_ane = true;
return *this;
}
bool use_cpu_only = false;
bool enable_on_subgraph = false;
bool enable_only_ane = false;
};
/**
* @brief This structure provides functions
* that fill inference options for CUDA Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#cuda-execution-provider
*/
struct GAPI_EXPORTS_W_SIMPLE CUDA {
// NB: Used from python.
/// @private -- Exclude this constructor from OpenCV documentation
GAPI_WRAP
CUDA() = default;
/** @brief Class constructor.
Constructs CUDA parameters based on device type information.
@param dev_id Target device id to use.
*/
GAPI_WRAP
explicit CUDA(const int dev_id)
: device_id(dev_id) {
}
int device_id;
};
/**
* @brief This structure provides functions
* that fill inference options for TensorRT Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#tensorrt-execution-provider
*/
struct GAPI_EXPORTS_W_SIMPLE TensorRT {
// NB: Used from python.
/// @private -- Exclude this constructor from OpenCV documentation
GAPI_WRAP
TensorRT() = default;
/** @brief Class constructor.
Constructs TensorRT parameters based on device type information.
@param dev_id Target device id to use.
*/
GAPI_WRAP
explicit TensorRT(const int dev_id)
: device_id(dev_id) {
}
int device_id;
};
/**
* @brief This structure provides functions
* that fill inference options for ONNX OpenVINO Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#summary-of-options
*/
struct GAPI_EXPORTS_W_SIMPLE OpenVINO {
// NB: Used from python.
/// @private -- Exclude this constructor from OpenCV documentation
GAPI_WRAP
OpenVINO() = default;
/** @brief Class constructor.
Constructs OpenVINO parameters based on device type information.
@param dev_type Target device type to use. ("CPU", "GPU", "GPU.0" etc)
*/
GAPI_WRAP
explicit OpenVINO(const std::string &dev_type)
: device_type(dev_type) {
}
/** @brief Class constructor.
Constructs OpenVINO parameters based on map of options passed.
* @param params A map of parameter names and their corresponding string values.
*/
GAPI_WRAP
explicit OpenVINO(const std::map<std::string, std::string>& params)
: params_map(params) {
}
/** @brief Specifies OpenVINO Execution Provider cache dir.
This function is used to explicitly specify the path to save and load
the blobs enabling model caching feature.
@param dir Path to the directory what will be used as cache.
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgCacheDir(const std::string &dir) {
if (!params_map.empty()) {
cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
"created from the parameters map."));
}
cache_dir = dir;
return *this;
}
/** @brief Specifies OpenVINO Execution Provider number of threads.
This function is used to override the accelerator default value
of number of threads with this value at runtime.
@param nthreads Number of threads.
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgNumThreads(size_t nthreads) {
if (!params_map.empty()) {
cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
"created from the parameters map."));
}
num_of_threads = nthreads;
return *this;
}
/** @brief Enables OpenVINO Execution Provider opencl throttling.
This function is used to enable OpenCL queue throttling for GPU devices
(reduces CPU utilization when using GPU).
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgEnableOpenCLThrottling() {
if (!params_map.empty()) {
cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
"created from the parameters map."));
}
enable_opencl_throttling = true;
return *this;
}
/** @brief Enables OpenVINO Execution Provider dynamic shapes.
This function is used to enable OpenCL queue throttling for GPU devices
(reduces CPU utilization when using GPU).
This function is used to enable work with dynamic shaped models
whose shape will be set dynamically based on the infer input
image/data shape at run time in CPU.
@return reference to this parameter structure.
*/
GAPI_WRAP
OpenVINO& cfgEnableDynamicShapes() {
if (!params_map.empty()) {
cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
"created from the parameters map."));
}
enable_dynamic_shapes = true;
return *this;
}
std::string device_type;
std::string cache_dir;
size_t num_of_threads = 0;
bool enable_opencl_throttling = false;
bool enable_dynamic_shapes = false;
std::map<std::string, std::string> params_map;
};
/**
* @brief This structure provides functions
* that fill inference options for ONNX DirectML Execution Provider.
* Please follow https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html#directml-execution-provider
*/
class GAPI_EXPORTS_W_SIMPLE DirectML {
public:
// NB: Used from python.
/// @private -- Exclude this constructor from OpenCV documentation
GAPI_WRAP
DirectML() = default;
/** @brief Class constructor.
Constructs DirectML parameters based on device id.
@param device_id Target device id to use. ("0", "1", etc)
*/
GAPI_WRAP
explicit DirectML(const int device_id) : ddesc(device_id) { };
/** @brief Class constructor.
Constructs DirectML parameters based on adapter name.
@param adapter_name Target adapter_name to use.
*/
GAPI_WRAP
explicit DirectML(const std::string &adapter_name) : ddesc(adapter_name) { };
using DeviceDesc = cv::util::variant<int, std::string>;
DeviceDesc ddesc;
};
using EP = cv::util::variant< cv::util::monostate
, OpenVINO
, DirectML
, CoreML
, CUDA
, TensorRT>;
} // namespace ep
GAPI_EXPORTS cv::gapi::GBackend backend();
enum class TraitAs: int {
TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
// and passes dimensions as-is
IMAGE //!< G-API traits an associated cv::Mat as an image so
// creates an "image" blob (NCHW/NHWC, etc)
};
using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
std::unordered_map<std::string, cv::Mat> &)>;
namespace detail {
/**
* @brief This structure contains description of inference parameters
* which is specific to ONNX models.
*/
struct ParamDesc {
std::string model_path; //!< Path to model.
// NB: nun_* may differ from topology's real input/output port numbers
// (e.g. topology's partial execution)
std::size_t num_in; //!< How many inputs are defined in the operation
std::size_t num_out; //!< How many outputs are defined in the operation
// NB: Here order follows the `Net` API
std::vector<std::string> input_names; //!< Names of input network layers.
std::vector<std::string> output_names; //!< Names of output network layers.
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs; //!< Map with pair of name of network layer and ConstInput which will be associated with this.
std::vector<cv::Scalar> mean; //!< Mean values for preprocessing.
std::vector<cv::Scalar> stdev; //!< Standard deviation values for preprocessing.
std::vector<cv::GMatDesc> out_metas; //!< Out meta information about your output (type, dimension).
PostProc custom_post_proc; //!< Post processing function.
std::vector<bool> normalize; //!< Vector of bool values that enabled or disabled normalize of input data.
std::vector<std::string> names_to_remap; //!< Names of output layers that will be processed in PostProc function.
bool is_generic;
// TODO: Needs to modify the rest of ParamDesc accordingly to support
// both generic and non-generic options without duplication
// (as it was done for the OV IE backend)
// These values are pushed into the respective vector<> fields above
// when the generic infer parameters are unpacked (see GONNXBackendImpl::unpackKernel)
std::unordered_map<std::string, std::pair<cv::Scalar, cv::Scalar> > generic_mstd;
std::unordered_map<std::string, bool> generic_norm;
std::map<std::string, std::string> session_options;
std::vector<cv::gapi::onnx::ep::EP> execution_providers;
bool disable_mem_pattern;
cv::util::optional<int> opt_level;
};
} // namespace detail
template<typename Net>
struct PortCfg {
using In = std::array
< std::string
, std::tuple_size<typename Net::InArgs>::value >;
using Out = std::array
< std::string
, std::tuple_size<typename Net::OutArgs>::value >;
using NormCoefs = std::array
< cv::Scalar
, std::tuple_size<typename Net::InArgs>::value >;
using Normalize = std::array
< bool
, std::tuple_size<typename Net::InArgs>::value >;
};
/**
* Contains description of inference parameters and kit of functions that
* fill this parameters.
*/
template<typename Net> class Params {
public:
/** @brief Class constructor.
Constructs Params based on model information and sets default values for other
inference description parameters.
@param model Path to model (.onnx file).
*/
Params(const std::string &model) {
desc.model_path = model;
desc.num_in = std::tuple_size<typename Net::InArgs>::value;
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
desc.is_generic = false;
desc.disable_mem_pattern = false;
}
/** @brief Specifies sequence of network input layers names for inference.
The function is used to associate data of graph inputs with input layers of
network topology. Number of names has to match the number of network inputs. If a network
has only one input layer, there is no need to call it as the layer is
associated with input automatically but this doesn't prevent you from
doing it yourself. Count of names has to match to number of network inputs.
@param layer_names std::array<std::string, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains names of input layers.
@return the reference on modified object.
*/
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
desc.input_names.assign(layer_names.begin(), layer_names.end());
return *this;
}
/** @brief Specifies sequence of output layers names for inference.
The function is used to associate data of graph outputs with output layers of
network topology. If a network has only one output layer, there is no need to call it
as the layer is associated with output automatically but this doesn't prevent
you from doing it yourself. Count of names has to match to number of network
outputs or you can set your own output but for this case you have to
additionally use @ref cfgPostProc function.
@param layer_names std::array<std::string, N> where N is the number of outputs
as defined in the @ref G_API_NET. Contains names of output layers.
@return the reference on modified object.
*/
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
desc.output_names.assign(layer_names.begin(), layer_names.end());
return *this;
}
/** @brief Sets a constant input.
The function is used to set constant input. This input has to be
a prepared tensor since preprocessing is disabled for this case. You should
provide name of network layer which will receive provided data.
@param layer_name Name of network layer.
@param data cv::Mat that contains data which will be associated with network layer.
@param hint Type of input (TENSOR).
@return the reference on modified object.
*/
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
desc.const_inputs[layer_name] = {data, hint};
return *this;
}
/** @brief Specifies mean value and standard deviation for preprocessing.
The function is used to set mean value and standard deviation for preprocessing
of input data.
@param m std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains mean values.
@param s std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains standard deviation values.
@return the reference on modified object.
*/
Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
const typename PortCfg<Net>::NormCoefs &s) {
desc.mean.assign(m.begin(), m.end());
desc.stdev.assign(s.begin(), s.end());
return *this;
}
/** @brief Configures graph output and provides the post processing function from user.
The function is used when you work with networks with dynamic outputs.
Since we can't know dimensions of inference result needs provide them for
construction of graph output. This dimensions can differ from inference result.
So you have to provide @ref PostProc function that gets information from inference
result and fill output which is constructed by dimensions from out_metas.
@param out_metas Out meta information about your output (type, dimension).
@param remap_function Post processing function, which has two parameters. First is onnx
result, second is graph output. Both parameters is std::map that contain pair of
layer's name and cv::Mat.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
const PostProc &remap_function) {
desc.out_metas = out_metas;
desc.custom_post_proc = remap_function;
return *this;
}
/** @overload
Function with a rvalue parameters.
@param out_metas rvalue out meta information about your output (type, dimension).
@param remap_function rvalue post processing function, which has two parameters. First is onnx
result, second is graph output. Both parameters is std::map that contain pair of
layer's name and cv::Mat.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
PostProc &&remap_function) {
desc.out_metas = std::move(out_metas);
desc.custom_post_proc = std::move(remap_function);
return *this;
}
/** @overload
The function has additional parameter names_to_remap. This parameter provides
information about output layers which will be used for inference and post
processing function.
@param out_metas Out meta information.
@param remap_function Post processing function.
@param names_to_remap Names of output layers. network's inference will
be done on these layers. Inference's result will be processed in post processing
function using these names.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
const PostProc &remap_function,
const std::vector<std::string> &names_to_remap) {
desc.out_metas = out_metas;
desc.custom_post_proc = remap_function;
desc.names_to_remap = names_to_remap;
return *this;
}
/** @overload
Function with a rvalue parameters and additional parameter names_to_remap.
@param out_metas rvalue out meta information.
@param remap_function rvalue post processing function.
@param names_to_remap rvalue names of output layers. network's inference will
be done on these layers. Inference's result will be processed in post processing
function using these names.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
PostProc &&remap_function,
std::vector<std::string> &&names_to_remap) {
desc.out_metas = std::move(out_metas);
desc.custom_post_proc = std::move(remap_function);
desc.names_to_remap = std::move(names_to_remap);
return *this;
}
/** @brief Specifies normalize parameter for preprocessing.
The function is used to set normalize parameter for preprocessing of input data.
@param normalizations std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Сontains bool values that enabled or disabled
normalize of input data.
@return the reference on modified object.
*/
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &normalizations) {
desc.normalize.assign(normalizations.begin(), normalizations.end());
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime OpenVINO Execution Provider options.
@param ep OpenVINO Execution Provider options.
@see cv::gapi::onnx::ep::OpenVINO.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::OpenVINO&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime DirectML Execution Provider options.
@param ep DirectML Execution Provider options.
@see cv::gapi::onnx::ep::DirectML.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::DirectML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime CoreML Execution Provider options.
@param ep CoreML Execution Provider options.
@see cv::gapi::onnx::ep::CoreML.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::CoreML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime CUDA Execution Provider options.
@param ep CUDA Execution Provider options.
@see cv::gapi::onnx::ep::CUDA.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::CUDA&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Adds execution provider for runtime.
The function is used to add ONNX Runtime TensorRT Execution Provider options.
@param ep TensorRT Execution Provider options.
@see cv::gapi::onnx::ep::TensorRT.
@return the reference on modified object.
*/
Params<Net>& cfgAddExecutionProvider(ep::TensorRT&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
return *this;
}
/** @brief Disables the memory pattern optimization.
@return the reference on modified object.
*/
Params<Net>& cfgDisableMemPattern() {
desc.disable_mem_pattern = true;
return *this;
}
/** @brief Configures session options for ONNX Runtime.
This function is used to set various session options for the ONNX Runtime
session by accepting a map of key-value pairs.
@param options A map of session option to be applied to the ONNX Runtime session.
@return the reference on modified object.
*/
Params<Net>& cfgSessionOptions(const std::map<std::string, std::string>& options) {
desc.session_options.insert(options.begin(), options.end());
return *this;
}
/** @brief Configures optimization level for ONNX Runtime.
@param opt_level [optimization level]: Valid values are 0 (disable), 1 (basic), 2 (extended), 99 (all).
Please see onnxruntime_c_api.h (enum GraphOptimizationLevel) for the full list of all optimization levels.
@return the reference on modified object.
*/
Params<Net>& cfgOptLevel(const int opt_level) {
desc.opt_level = cv::util::make_optional(opt_level);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::onnx::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
};
/*
* @brief This structure provides functions for generic network type that
* fill inference parameters.
* @see struct Generic
*/
template<>
class Params<cv::gapi::Generic> {
public:
/** @brief Class constructor.
Constructs Params based on input information and sets default values for other
inference description parameters.
@param tag string tag of the network for which these parameters are intended.
@param model_path path to model file (.onnx file).
*/
Params(const std::string& tag, const std::string& model_path)
: desc{ model_path, 0u, 0u, {}, {}, {}, {}, {}, {}, {}, {}, {}, true, {}, {}, {}, {}, false, {} }, m_tag(tag) {}
/** @see onnx::Params::cfgMeanStdDev. */
void cfgMeanStdDev(const std::string &layer,
const cv::Scalar &m,
const cv::Scalar &s) {
desc.generic_mstd[layer] = std::make_pair(m, s);
}
/** @see onnx::Params::cfgNormalize. */
void cfgNormalize(const std::string &layer, bool flag) {
desc.generic_norm[layer] = flag;
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::OpenVINO&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::DirectML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::CoreML&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::CUDA&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgAddExecutionProvider. */
void cfgAddExecutionProvider(ep::TensorRT&& ep) {
desc.execution_providers.emplace_back(std::move(ep));
}
/** @see onnx::Params::cfgDisableMemPattern. */
void cfgDisableMemPattern() {
desc.disable_mem_pattern = true;
}
/** @see onnx::Params::cfgSessionOptions. */
void cfgSessionOptions(const std::map<std::string, std::string>& options) {
desc.session_options.insert(options.begin(), options.end());
}
/** @see onnx::Params::cfgOptLevel. */
void cfgOptLevel(const int opt_level) {
desc.opt_level = cv::util::make_optional(opt_level);
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::onnx::backend(); }
std::string tag() const { return m_tag; }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
std::string m_tag;
};
class WorkloadTypeONNX : public WorkloadType {};
using WorkloadTypeONNXPtr = std::shared_ptr<cv::gapi::onnx::WorkloadTypeONNX>;
} // namespace onnx
} // namespace gapi
namespace detail {
template<> struct CompileArgTag<cv::gapi::onnx::WorkloadTypeONNXPtr> {
static const char* tag() { return "gapi.onnx.workload_type"; }
};
} // namespace detail
} // namespace cv
#endif // OPENCV_GAPI_INFER_HPP

View File

@@ -1,771 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2023 Intel Corporation
#ifndef OPENCV_GAPI_INFER_OV_HPP
#define OPENCV_GAPI_INFER_OV_HPP
#include <string>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/own/exports.hpp> // GAPI_EXPORTS
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
#include <opencv2/gapi/infer.hpp> // Generic
#include <opencv2/gapi/infer/workload_type.hpp>
#include <map>
namespace cv {
namespace gapi {
/**
* @brief This namespace contains G-API OpenVINO 2.0 backend functions,
* structures, and symbols.
*/
namespace ov {
GAPI_EXPORTS cv::gapi::GBackend backend();
namespace detail {
template <typename T>
using AttrMap = std::map<std::string, T>;
// NB: This type is supposed to be used to hold in/out layers
// attributes such as precision, layout, shape etc.
//
// User can provide attributes either:
// 1. cv::util::monostate - No value specified explicitly.
// 2. Attr - value specified explicitly that should be broadcasted to all layers.
// 3. AttrMap[str->T] - map specifies value for particular layer.
template <typename Attr>
using LayerVariantAttr = cv::util::variant< cv::util::monostate
, AttrMap<Attr>
, Attr>;
struct ParamDesc {
struct Model {
Model(const std::string &model_path_,
const std::string &bin_path_)
: model_path(model_path_), bin_path(bin_path_) {
}
std::string model_path;
std::string bin_path;
LayerVariantAttr<std::string> input_tensor_layout;
LayerVariantAttr<std::string> input_model_layout;
LayerVariantAttr<std::string> output_tensor_layout;
LayerVariantAttr<std::string> output_model_layout;
LayerVariantAttr<int> output_tensor_precision;
LayerVariantAttr<std::vector<size_t>> new_shapes;
LayerVariantAttr<std::vector<float>> mean_values;
LayerVariantAttr<std::vector<float>> scale_values;
LayerVariantAttr<int> interpolation;
bool clamp_outputs = false;
};
struct CompiledModel {
std::string blob_path;
};
using Kind = cv::util::variant<Model, CompiledModel>;
ParamDesc(Kind &&kind_,
const std::string &device_,
const bool is_generic_,
const size_t num_in_,
const size_t num_out_)
: kind(std::move(kind_)), device(device_),
is_generic(is_generic_),
num_in(num_in_), num_out(num_out_) {
}
Kind kind;
std::string device;
bool is_generic;
std::size_t num_in;
std::size_t num_out;
std::vector<std::string> input_names;
std::vector<std::string> output_names;
using PluginConfigT = std::map<std::string, std::string>;
PluginConfigT config;
size_t nireq = 1;
bool ensure_named_tensors = false;
};
// NB: Just helper to avoid code duplication.
static detail::ParamDesc::Model&
getModelToSetAttrOrThrow(detail::ParamDesc::Kind &kind,
const std::string &attr_name) {
if (cv::util::holds_alternative<detail::ParamDesc::CompiledModel>(kind)) {
cv::util::throw_error(
std::logic_error("Specifying " + attr_name + " isn't"
" possible for compiled model."));
}
GAPI_Assert(cv::util::holds_alternative<detail::ParamDesc::Model>(kind));
return cv::util::get<detail::ParamDesc::Model>(kind);
}
} // namespace detail
/**
* @brief This structure provides functions
* that fill inference parameters for "OpenVINO Toolkit" model.
*/
template<typename Net> struct Params {
public:
/** @brief Class constructor.
Constructs Params based on model information and specifies default values for other
inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
@param model_path Path to a model.
@param bin_path Path to a data file.
For IR format (*.bin):
If path is empty, will try to read a bin file with the same name as xml.
If the bin file with the same name is not found, will load IR without weights.
For PDPD (*.pdmodel) and ONNX (*.onnx) formats bin_path isn't used.
@param device target device to use.
*/
Params(const std::string &model_path,
const std::string &bin_path,
const std::string &device)
: m_desc( detail::ParamDesc::Kind{detail::ParamDesc::Model{model_path, bin_path}}
, device
, false /* is generic */
, std::tuple_size<typename Net::InArgs>::value
, std::tuple_size<typename Net::OutArgs>::value) {
}
/** @overload
Use this constructor to work with pre-compiled network.
Model is imported from a pre-compiled blob.
@param blob_path path to the compiled model (*.blob).
@param device target device to use.
*/
Params(const std::string &blob_path,
const std::string &device)
: m_desc( detail::ParamDesc::Kind{detail::ParamDesc::CompiledModel{blob_path}}
, device
, false /* is generic */
, std::tuple_size<typename Net::InArgs>::value
, std::tuple_size<typename Net::OutArgs>::value) {
}
/** @brief Specifies sequence of network input layers names for inference.
The function is used to associate cv::gapi::infer<> inputs with the model inputs.
Number of names has to match the number of network inputs as defined in G_API_NET().
In case a network has only single input layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains names of input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputLayers(const std::vector<std::string> &layer_names) {
m_desc.input_names = layer_names;
return *this;
}
/** @brief Specifies sequence of network output layers names for inference.
The function is used to associate cv::gapi::infer<> outputs with the model outputs.
Number of names has to match the number of network outputs as defined in G_API_NET().
In case a network has only single output layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of outputs
as defined in the @ref G_API_NET. Contains names of output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputLayers(const std::vector<std::string> &layer_names) {
m_desc.output_names = layer_names;
return *this;
}
/** @brief Specifies OpenVINO plugin configuration.
The function is used to set configuration for OpenVINO plugin. Some parameters
can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
to check information about specific plugin.
@param config Map of pairs: (config parameter name, config parameter value).
@return reference to this parameter structure.
*/
Params<Net>& cfgPluginConfig(const detail::ParamDesc::PluginConfigT &config) {
m_desc.config = config;
return *this;
}
/** @brief Ensures the model has named tensors.
This function is used to ensure that all tensors in the model have names.
It goes through all input and output nodes of the model and sets the names
if they are not set. This is neccessary for models with nameless tensors.
If a tensor does not have a name, it will be assigned a default name
based on the producer node's friendly name. If the producer node has multiple
outputs, the name will be in the form "node_name:N", where N is the output index.
@param flag If true, then it guarantees that all tensors will have names.
@return reference to this parameter structure.
*/
Params<Net>& cfgEnsureNamedTensors(bool flag = true) {
m_desc.ensure_named_tensors = flag;
return *this;
}
/** @brief Specifies tensor layout for an input layer.
The function is used to set tensor layout for an input layer.
@param layout Tensor layout ("NCHW", "NWHC", etc)
will be applied to all input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputTensorLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
.input_tensor_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding input layer
and its tensor layout represented in std::string ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgInputTensorLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
.input_tensor_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies model layout for an input layer.
The function is used to set model layout for an input layer.
@param layout Model layout ("NCHW", "NHWC", etc)
will be applied to all input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputModelLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
.input_model_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding input layer
and its model layout ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgInputModelLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
.input_model_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies tensor layout for an output layer.
The function is used to set tensor layout for an output layer.
@param layout Tensor layout ("NCHW", "NWHC", etc)
will be applied to all output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputTensorLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
.output_tensor_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding output layer
and its tensor layout represented in std::string ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgOutputTensorLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
.output_tensor_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies model layout for an output layer.
The function is used to set model layout for an output layer.
@param layout Model layout ("NCHW", "NHWC", etc)
will be applied to all output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputModelLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
.output_model_layout = std::move(layout);
return *this;
}
/** @overload
@param layout_map Map of pairs: name of corresponding output layer
and its model layout ("NCHW", "NHWC", etc)
@return reference to this parameter structure.
*/
Params<Net>&
cfgOutputModelLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
.output_model_layout = std::move(layout_map);
return *this;
}
/** @brief Specifies tensor precision for an output layer.
The function is used to set tensor precision for an output layer..
@param precision Precision in OpenCV format (CV_8U, CV_32F, ...)
will be applied to all output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputTensorPrecision(int precision) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
.output_tensor_precision = precision;
return *this;
}
/** @overload
@param precision_map Map of pairs: name of corresponding output layer
and its precision in OpenCV format (CV_8U, CV_32F, ...)
@return reference to this parameter structure.
*/
Params<Net>&
cfgOutputTensorPrecision(detail::AttrMap<int> precision_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
.output_tensor_precision = std::move(precision_map);
return *this;
}
/** @brief Enables or disables clamping of model outputs in the PrePostProcessor.
By default, output values are clamped to the valid range for the output precision
by the device or plugin. Enabling this option moves clamping to the PrePostProcessor stage.
@note This feature is only available with OpenVINO 2025.2 and newer.
@param flag If true, clamping is performed in the PrePostProcessor;
otherwise, it is handled by the device or plugin.
@return reference to this parameter structure.
*/
Params<Net>&
cfgClampOutputs(bool flag = true) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "clamp outputs")
.clamp_outputs = std::move(flag);
return *this;
}
/** @brief Specifies the new shape for input layers.
The function is used to set new shape for input layers.
@param new_shape New shape will be applied to all input layers.
@return reference to this parameter structure.
*/
Params<Net>&
cfgReshape(std::vector<size_t> new_shape) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
.new_shapes = std::move(new_shape);
return *this;
}
/** @overload
@param new_shape_map Map of pairs: name of corresponding output layer
and its new shape.
@return reference to this parameter structure.
*/
Params<Net>&
cfgReshape(detail::AttrMap<std::vector<size_t>> new_shape_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
.new_shapes = std::move(new_shape_map);
return *this;
}
/** @brief Specifies number of asynchronous inference requests.
@param nireq Number of inference asynchronous requests.
@return reference to this parameter structure.
*/
Params<Net>& cfgNumRequests(const size_t nireq) {
if (nireq == 0) {
cv::util::throw_error(
std::logic_error("Number of inference requests"
" must be greater than zero."));
}
m_desc.nireq = nireq;
return *this;
}
/** @brief Specifies mean values for preprocessing.
*
The function is used to set mean values for input layer preprocessing.
@param mean_values Float vector contains mean values
@return reference to this parameter structure.
*/
Params<Net>& cfgMean(std::vector<float> mean_values) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
.mean_values = std::move(mean_values);
return *this;
}
/** @overload
@param mean_map Map of pairs: name of corresponding input layer
and its mean values.
@return reference to this parameter structure.
*/
Params<Net>& cfgMean(detail::AttrMap<std::vector<float>> mean_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
.mean_values = std::move(mean_map);
return *this;
}
/** @brief Specifies scale values for preprocessing.
*
The function is used to set scale values for input layer preprocessing.
@param scale_values Float vector contains scale values
@return reference to this parameter structure.
*/
Params<Net>& cfgScale(std::vector<float> scale_values) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
.scale_values = std::move(scale_values);
return *this;
}
/** @overload
@param scale_map Map of pairs: name of corresponding input layer
and its mean values.
@return reference to this parameter structure.
*/
Params<Net>& cfgScale(detail::AttrMap<std::vector<float>> scale_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
.scale_values = std::move(scale_map);
return *this;
}
/** @brief Specifies resize interpolation algorithm.
*
The function is used to configure resize preprocessing for input layer.
@param interpolation Resize interpolation algorithm.
Supported algorithms: #INTER_NEAREST, #INTER_LINEAR, #INTER_CUBIC.
@return reference to this parameter structure.
*/
Params<Net>& cfgResize(int interpolation) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
.interpolation = std::move(interpolation);
return *this;
}
/** @overload
@param interpolation Map of pairs: name of corresponding input layer
and its resize algorithm.
@return reference to this parameter structure.
*/
Params<Net>& cfgResize(detail::AttrMap<int> interpolation) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
.interpolation = std::move(interpolation);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ov::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { m_desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc m_desc;
};
/*
* @brief This structure provides functions for generic network type that
* fill inference parameters.
* @see struct Generic
*/
template<>
class Params<cv::gapi::Generic> {
public:
/** @brief Class constructor.
Constructs Params based on model information and specifies default values for other
inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
@param tag string tag of the network for which these parameters are intended.
@param model_path Path to a model.
@param bin_path Path to a data file.
For IR format (*.bin):
If path is empty, will try to read a bin file with the same name as xml.
If the bin file with the same name is not found, will load IR without weights.
For PDPD (*.pdmodel) and ONNX (*.onnx) formats bin_path isn't used.
@param device target device to use.
*/
Params(const std::string &tag,
const std::string &model_path,
const std::string &bin_path,
const std::string &device)
: m_tag(tag),
m_desc( detail::ParamDesc::Kind{detail::ParamDesc::Model{model_path, bin_path}}
, device
, true /* is generic */
, 0u
, 0u) {
}
/** @overload
This constructor for pre-compiled networks. Model is imported from pre-compiled
blob.
@param tag string tag of the network for which these parameters are intended.
@param blob_path path to the compiled model (*.blob).
@param device target device to use.
*/
Params(const std::string &tag,
const std::string &blob_path,
const std::string &device)
: m_tag(tag),
m_desc( detail::ParamDesc::Kind{detail::ParamDesc::CompiledModel{blob_path}}
, device
, true /* is generic */
, 0u
, 0u) {
}
/** @see ov::Params::cfgPluginConfig. */
Params& cfgPluginConfig(const detail::ParamDesc::PluginConfigT &config) {
m_desc.config = config;
return *this;
}
/** @see ov::Params::cfgEnsureNamedTensors. */
Params& cfgEnsureNamedTensors(bool flag = true) {
m_desc.ensure_named_tensors = flag;
return *this;
}
/** @see ov::Params::cfgInputTensorLayout. */
Params& cfgInputTensorLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
.input_tensor_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgInputTensorLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
.input_tensor_layout = std::move(layout_map);
return *this;
}
/** @see ov::Params::cfgInputModelLayout. */
Params& cfgInputModelLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
.input_model_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgInputModelLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
.input_model_layout = std::move(layout_map);
return *this;
}
/** @see ov::Params::cfgOutputTensorLayout. */
Params& cfgOutputTensorLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
.output_tensor_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgOutputTensorLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
.output_tensor_layout = std::move(layout_map);
return *this;
}
/** @see ov::Params::cfgOutputModelLayout. */
Params& cfgOutputModelLayout(std::string layout) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
.output_model_layout = std::move(layout);
return *this;
}
/** @overload */
Params&
cfgOutputModelLayout(detail::AttrMap<std::string> layout_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
.output_model_layout = std::move(layout_map);
return *this;
}
/** @see ov::Params::cfgOutputTensorPrecision. */
Params& cfgOutputTensorPrecision(int precision) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
.output_tensor_precision = precision;
return *this;
}
/** @overload */
Params&
cfgOutputTensorPrecision(detail::AttrMap<int> precision_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
.output_tensor_precision = std::move(precision_map);
return *this;
}
/** @see ov::Params::cfgClampOutputs. */
Params&
cfgClampOutputs(bool flag = true) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "clamp outputs")
.clamp_outputs = std::move(flag);
return *this;
}
/** @see ov::Params::cfgReshape. */
Params& cfgReshape(std::vector<size_t> new_shape) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
.new_shapes = std::move(new_shape);
return *this;
}
/** @overload */
Params&
cfgReshape(detail::AttrMap<std::vector<size_t>> new_shape_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
.new_shapes = std::move(new_shape_map);
return *this;
}
/** @see ov::Params::cfgNumRequests. */
Params& cfgNumRequests(const size_t nireq) {
if (nireq == 0) {
cv::util::throw_error(
std::logic_error("Number of inference requests"
" must be greater than zero."));
}
m_desc.nireq = nireq;
return *this;
}
/** @see ov::Params::cfgMean. */
Params& cfgMean(std::vector<float> mean_values) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
.mean_values = std::move(mean_values);
return *this;
}
/** @overload */
Params& cfgMean(detail::AttrMap<std::vector<float>> mean_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
.mean_values = std::move(mean_map);
return *this;
}
/** @see ov::Params::cfgScale. */
Params& cfgScale(std::vector<float> scale_values) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
.scale_values = std::move(scale_values);
return *this;
}
/** @overload */
Params& cfgScale(detail::AttrMap<std::vector<float>> scale_map) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
.scale_values = std::move(scale_map);
return *this;
}
/** @see ov::Params::cfgResize. */
Params& cfgResize(int interpolation) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
.interpolation = std::move(interpolation);
return *this;
}
/** @overload */
Params& cfgResize(detail::AttrMap<int> interpolation) {
detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
.interpolation = std::move(interpolation);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ov::backend(); }
std::string tag() const { return m_tag; }
cv::util::any params() const { return { m_desc }; }
// END(G-API's network parametrization API)
protected:
std::string m_tag;
detail::ParamDesc m_desc;
};
} // namespace ov
namespace wip { namespace ov {
/**
* @brief Ask G-API OpenVINO backend to run only inference of model provided.
*
* G-API OpenVINO backend will perform only the inference of the model provided
* without populating input and copying back output data.
* This mode is used to evaluate the pure inference performance of the model without
* taking into account the i/o data transfer.
*/
struct benchmark_mode { };
class WorkloadTypeOV : public WorkloadType {};
using WorkloadTypeOVPtr = std::shared_ptr<cv::gapi::wip::ov::WorkloadTypeOV>;
} // namespace ov
} // namespace wip
} // namespace gapi
namespace detail
{
template<> struct CompileArgTag<cv::gapi::wip::ov::benchmark_mode>
{
static const char* tag() { return "gapi.wip.ov.benchmark_mode"; }
};
template<> struct CompileArgTag<cv::gapi::wip::ov::WorkloadTypeOVPtr>
{
static const char* tag() { return "gapi.wip.ov.workload_type"; }
};
}
} // namespace cv
#endif // OPENCV_GAPI_INFER_OV_HPP

View File

@@ -1,138 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
#ifndef OPENCV_GAPI_PARSERS_HPP
#define OPENCV_GAPI_PARSERS_HPP
#include <utility> // std::tuple
#include <opencv2/gapi/gmat.hpp>
#include <opencv2/gapi/gkernel.hpp>
namespace cv { namespace gapi {
namespace nn {
namespace parsers {
using GRects = GArray<Rect>;
using GDetections = std::tuple<GArray<Rect>, GArray<int>>;
G_TYPED_KERNEL(GParseSSDBL, <GDetections(GMat, GOpaque<Size>, float, int)>,
"org.opencv.nn.parsers.parseSSD_BL") {
static std::tuple<GArrayDesc,GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&, float, int) {
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
};
G_TYPED_KERNEL(GParseSSD, <GRects(GMat, GOpaque<Size>, float, bool, bool)>,
"org.opencv.nn.parsers.parseSSD") {
static GArrayDesc outMeta(const GMatDesc&, const GOpaqueDesc&, float, bool, bool) {
return empty_array_desc();
}
};
G_TYPED_KERNEL(GParseYolo, <GDetections(GMat, GOpaque<Size>, float, float, std::vector<float>)>,
"org.opencv.nn.parsers.parseYolo") {
static std::tuple<GArrayDesc, GArrayDesc> outMeta(const GMatDesc&, const GOpaqueDesc&,
float, float, const std::vector<float>&) {
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
static const std::vector<float>& defaultAnchors() {
static std::vector<float> anchors {
0.57273f, 0.677385f, 1.87446f, 2.06253f, 3.33843f, 5.47434f, 7.88282f, 3.52778f, 9.77052f, 9.16828f
};
return anchors;
}
};
} // namespace parsers
} // namespace nn
/** @brief Parses output of SSD network.
Extracts detection information (box, confidence, label) from SSD output and
filters it by given confidence and label.
@note Function textual ID is "org.opencv.nn.parsers.parseSSD_BL"
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
@param inSz Size to project detected boxes to (size of the input image).
@param confidenceThreshold If confidence of the
detection is smaller than confidence threshold, detection is rejected.
@param filterLabel If provided (!= -1), only detections with
given label will get to the output.
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
*/
GAPI_EXPORTS_W std::tuple<GArray<Rect>, GArray<int>> parseSSD(const GMat& in,
const GOpaque<Size>& inSz,
const float confidenceThreshold = 0.5f,
const int filterLabel = -1);
/** @brief Parses output of SSD network.
Extracts detection information (box, confidence) from SSD output and
filters it by given confidence and by going out of bounds.
@note Function textual ID is "org.opencv.nn.parsers.parseSSD"
@param in Input CV_32F tensor with {1,1,N,7} dimensions.
@param inSz Size to project detected boxes to (size of the input image).
@param confidenceThreshold If confidence of the
detection is smaller than confidence threshold, detection is rejected.
@param alignmentToSquare If provided true, bounding boxes are extended to squares.
The center of the rectangle remains unchanged, the side of the square is
the larger side of the rectangle.
@param filterOutOfBounds If provided true, out-of-frame boxes are filtered.
@return a vector of detected bounding boxes.
*/
GAPI_EXPORTS_W GArray<Rect> parseSSD(const GMat& in,
const GOpaque<Size>& inSz,
const float confidenceThreshold,
const bool alignmentToSquare,
const bool filterOutOfBounds);
/** @brief Parses output of Yolo network.
Extracts detection information (box, confidence, label) from Yolo output,
filters it by given confidence and performs non-maximum suppression for overlapping boxes.
@note Function textual ID is "org.opencv.nn.parsers.parseYolo"
@param in Input CV_32F tensor with {1,13,13,N} dimensions, N should satisfy:
\f[\texttt{N} = (\texttt{num_classes} + \texttt{5}) * \texttt{5},\f]
where num_classes - a number of classes Yolo network was trained with.
@param inSz Size to project detected boxes to (size of the input image).
@param confidenceThreshold If confidence of the
detection is smaller than confidence threshold, detection is rejected.
@param nmsThreshold Non-maximum suppression threshold which controls minimum
relative box intersection area required for rejecting the box with a smaller confidence.
If 1.f, nms is not performed and no boxes are rejected.
@param anchors Anchors Yolo network was trained with.
@note The default anchor values are specified for YOLO v2 Tiny as described in Intel Open Model Zoo
<a href="https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/yolo-v2-tiny-tf/yolo-v2-tiny-tf.md">documentation</a>.
@return a tuple with a vector of detected boxes and a vector of appropriate labels.
*/
GAPI_EXPORTS_W std::tuple<GArray<Rect>, GArray<int>> parseYolo(const GMat& in,
const GOpaque<Size>& inSz,
const float confidenceThreshold = 0.5f,
const float nmsThreshold = 0.5f,
const std::vector<float>& anchors
= nn::parsers::GParseYolo::defaultAnchors());
} // namespace gapi
} // namespace cv
// Reimport parseSSD & parseYolo under their initial namespace
namespace cv {
namespace gapi {
namespace streaming {
using cv::gapi::parseSSD;
using cv::gapi::parseYolo;
} // namespace streaming
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_PARSERS_HPP

View File

@@ -1,59 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2025 Intel Corporation
#ifndef OPENCV_WORKLOADTYPE_HPP
#define OPENCV_WORKLOADTYPE_HPP
#include <string>
#include <functional>
#include <vector>
#include <algorithm>
using Callback = std::function<void(const std::string &type)>;
class WorkloadListener {
Callback callback;
public:
uint64_t id;
WorkloadListener(const Callback &cb, uint64_t listener_id) : callback(cb), id(listener_id) {}
void operator()(const std::string &type) const {
if (callback) {
callback(type);
}
}
bool operator==(const WorkloadListener& other) const {
return id == other.id;
}
};
class WorkloadType {
std::vector<WorkloadListener> listeners;
uint64_t nextId = 1;
public:
uint64_t addListener(const Callback &cb) {
uint64_t id = nextId++;
listeners.emplace_back(cb, id);
return id;
}
void removeListener(uint64_t id) {
auto it = std::remove_if(listeners.begin(), listeners.end(),
[id](const WorkloadListener& entry) { return entry.id == id; });
if (it != listeners.end()) {
listeners.erase(it, listeners.end());
}
}
void set(const std::string &type) {
for (const auto &listener : listeners) {
listener(type);
}
}
};
#endif // OPENCV_WORKLOADTYPE_HPP