github.com/kaydxh/golang@v0.0.131/pkg/gocv/cgo/third_path/opencv4/include/opencv2/gapi/infer/ie.hpp (about) 1 // This file is part of OpenCV project. 2 // It is subject to the license terms in the LICENSE file found in the top-level directory 3 // of this distribution and at http://opencv.org/license.html. 4 // 5 // Copyright (C) 2019-2021 Intel Corporation 6 7 #ifndef OPENCV_GAPI_INFER_IE_HPP 8 #define OPENCV_GAPI_INFER_IE_HPP 9 10 #include <unordered_map> 11 #include <unordered_set> 12 #include <string> 13 #include <array> 14 #include <tuple> // tuple, tuple_size 15 #include <map> 16 17 #include <opencv2/gapi/opencv_includes.hpp> 18 #include <opencv2/gapi/util/any.hpp> 19 20 #include <opencv2/core/cvdef.h> // GAPI_EXPORTS 21 #include <opencv2/gapi/gkernel.hpp> // GKernelPackage 22 #include <opencv2/gapi/infer.hpp> // Generic 23 24 namespace cv { 25 namespace gapi { 26 // FIXME: introduce a new sub-namespace for NN? 27 28 /** 29 * @brief This namespace contains G-API OpenVINO backend functions, 30 * structures, and symbols. 31 */ 32 namespace ie { 33 34 GAPI_EXPORTS cv::gapi::GBackend backend(); 35 36 /** 37 * Specifies how G-API and IE should trait input data 38 * 39 * In OpenCV, the same cv::Mat is used to represent both 40 * image and tensor data. Sometimes those are hardly distinguishable, 41 * so this extra parameter is used to give G-API a hint. 42 * 43 * This hint controls how G-API reinterprets the data when converting 44 * it to IE Blob format (and which layout/etc is assigned to this data). 45 */ 46 enum class TraitAs: int 47 { 48 TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor and passes dimensions as-is 49 IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc) 50 }; 51 52 using IEConfig = std::map<std::string, std::string>; 53 54 namespace detail { 55 struct ParamDesc { 56 std::string model_path; 57 std::string weights_path; 58 std::string device_id; 59 60 std::vector<std::string> input_names; 61 std::vector<std::string> output_names; 62 63 using ConstInput = std::pair<cv::Mat, TraitAs>; 64 std::unordered_map<std::string, ConstInput> const_inputs; 65 66 std::size_t num_in; 67 std::size_t num_out; 68 69 enum class Kind {Load, Import}; 70 Kind kind; 71 bool is_generic; 72 IEConfig config; 73 74 std::map<std::string, std::vector<std::size_t>> reshape_table; 75 std::unordered_set<std::string> layer_names_to_reshape; 76 77 // NB: Number of asyncrhonious infer requests 78 size_t nireq; 79 80 // NB: An optional config to setup RemoteContext for IE 81 cv::util::any context_config; 82 }; 83 } // namespace detail 84 85 // FIXME: this is probably a shared (reusable) thing 86 template<typename Net> 87 struct PortCfg { 88 using In = std::array 89 < std::string 90 , std::tuple_size<typename Net::InArgs>::value >; 91 using Out = std::array 92 < std::string 93 , std::tuple_size<typename Net::OutArgs>::value >; 94 }; 95 96 /** 97 * @brief This structure provides functions 98 * that fill inference parameters for "OpenVINO Toolkit" model. 99 */ 100 template<typename Net> class Params { 101 public: 102 /** @brief Class constructor. 103 104 Constructs Params based on model information and specifies default values for other 105 inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit". 106 107 @param model Path to topology IR (.xml file). 108 @param weights Path to weights (.bin file). 109 @param device target device to use. 110 */ 111 Params(const std::string &model, 112 const std::string &weights, 113 const std::string &device) 114 : desc{ model, weights, device, {}, {}, {} 115 , std::tuple_size<typename Net::InArgs>::value // num_in 116 , std::tuple_size<typename Net::OutArgs>::value // num_out 117 , detail::ParamDesc::Kind::Load 118 , false 119 , {} 120 , {} 121 , {} 122 , 1u 123 , {}} { 124 }; 125 126 /** @overload 127 Use this constructor to work with pre-compiled network. 128 Model is imported from a pre-compiled blob. 129 130 @param model Path to model. 131 @param device target device to use. 132 */ 133 Params(const std::string &model, 134 const std::string &device) 135 : desc{ model, {}, device, {}, {}, {} 136 , std::tuple_size<typename Net::InArgs>::value // num_in 137 , std::tuple_size<typename Net::OutArgs>::value // num_out 138 , detail::ParamDesc::Kind::Import 139 , false 140 , {} 141 , {} 142 , {} 143 , 1u 144 , {}} { 145 }; 146 147 /** @brief Specifies sequence of network input layers names for inference. 148 149 The function is used to associate cv::gapi::infer<> inputs with the model inputs. 150 Number of names has to match the number of network inputs as defined in G_API_NET(). 151 In case a network has only single input layer, there is no need to specify name manually. 152 153 @param layer_names std::array<std::string, N> where N is the number of inputs 154 as defined in the @ref G_API_NET. Contains names of input layers. 155 @return reference to this parameter structure. 156 */ 157 Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) { 158 desc.input_names.clear(); 159 desc.input_names.reserve(layer_names.size()); 160 std::copy(layer_names.begin(), layer_names.end(), 161 std::back_inserter(desc.input_names)); 162 return *this; 163 } 164 165 /** @brief Specifies sequence of network output layers names for inference. 166 167 The function is used to associate cv::gapi::infer<> outputs with the model outputs. 168 Number of names has to match the number of network outputs as defined in G_API_NET(). 169 In case a network has only single output layer, there is no need to specify name manually. 170 171 @param layer_names std::array<std::string, N> where N is the number of outputs 172 as defined in the @ref G_API_NET. Contains names of output layers. 173 @return reference to this parameter structure. 174 */ 175 Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) { 176 desc.output_names.clear(); 177 desc.output_names.reserve(layer_names.size()); 178 std::copy(layer_names.begin(), layer_names.end(), 179 std::back_inserter(desc.output_names)); 180 return *this; 181 } 182 183 /** @brief Specifies a constant input. 184 185 The function is used to set a constant input. This input has to be 186 a preprocessed tensor if its type is TENSOR. Need to provide name of the 187 network layer which will receive provided data. 188 189 @param layer_name Name of network layer. 190 @param data cv::Mat that contains data which will be associated with network layer. 191 @param hint Input type @sa cv::gapi::ie::TraitAs. 192 @return reference to this parameter structure. 193 */ 194 Params<Net>& constInput(const std::string &layer_name, 195 const cv::Mat &data, 196 TraitAs hint = TraitAs::TENSOR) { 197 desc.const_inputs[layer_name] = {data, hint}; 198 return *this; 199 } 200 201 /** @brief Specifies OpenVINO plugin configuration. 202 203 The function is used to set configuration for OpenVINO plugin. Some parameters 204 can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html 205 to check information about specific plugin. 206 207 @param cfg Map of pairs: (config parameter name, config parameter value). 208 @return reference to this parameter structure. 209 */ 210 Params& pluginConfig(const IEConfig& cfg) { 211 desc.config = cfg; 212 return *this; 213 } 214 215 /** @overload 216 Function with a rvalue parameter. 217 218 @param cfg rvalue map of pairs: (config parameter name, config parameter value). 219 @return reference to this parameter structure. 220 */ 221 Params& pluginConfig(IEConfig&& cfg) { 222 desc.config = std::move(cfg); 223 return *this; 224 } 225 226 /** @brief Specifies configuration for RemoteContext in InferenceEngine. 227 228 When RemoteContext is configured the backend imports the networks using the context. 229 It also expects cv::MediaFrames to be actually remote, to operate with blobs via the context. 230 231 @param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap. 232 @return reference to this parameter structure. 233 */ 234 Params& cfgContextParams(const cv::util::any& ctx_cfg) { 235 desc.context_config = ctx_cfg; 236 return *this; 237 } 238 239 /** @overload 240 Function with an rvalue parameter. 241 242 @param ctx_cfg cv::util::any value which holds InferenceEngine::ParamMap. 243 @return reference to this parameter structure. 244 */ 245 Params& cfgContextParams(cv::util::any&& ctx_cfg) { 246 desc.context_config = std::move(ctx_cfg); 247 return *this; 248 } 249 250 /** @brief Specifies number of asynchronous inference requests. 251 252 @param nireq Number of inference asynchronous requests. 253 @return reference to this parameter structure. 254 */ 255 Params& cfgNumRequests(size_t nireq) { 256 GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!"); 257 desc.nireq = nireq; 258 return *this; 259 } 260 261 /** @brief Specifies new input shapes for the network inputs. 262 263 The function is used to specify new input shapes for the network inputs. 264 Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1networkNetwork.html 265 for additional information. 266 267 @param reshape_table Map of pairs: name of corresponding data and its dimension. 268 @return reference to this parameter structure. 269 */ 270 Params<Net>& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>& reshape_table) { 271 desc.reshape_table = reshape_table; 272 return *this; 273 } 274 275 /** @overload */ 276 Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) { 277 desc.reshape_table = std::move(reshape_table); 278 return *this; 279 } 280 281 /** @overload 282 283 @param layer_name Name of layer. 284 @param layer_dims New dimensions for this layer. 285 @return reference to this parameter structure. 286 */ 287 Params<Net>& cfgInputReshape(const std::string& layer_name, const std::vector<size_t>& layer_dims) { 288 desc.reshape_table.emplace(layer_name, layer_dims); 289 return *this; 290 } 291 292 /** @overload */ 293 Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) { 294 desc.reshape_table.emplace(layer_name, layer_dims); 295 return *this; 296 } 297 298 /** @overload 299 300 @param layer_names set of names of network layers that will be used for network reshape. 301 @return reference to this parameter structure. 302 */ 303 Params<Net>& cfgInputReshape(const std::unordered_set<std::string>& layer_names) { 304 desc.layer_names_to_reshape = layer_names; 305 return *this; 306 } 307 308 /** @overload 309 310 @param layer_names rvalue set of the selected layers will be reshaped automatically 311 its input image size. 312 @return reference to this parameter structure. 313 */ 314 Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) { 315 desc.layer_names_to_reshape = std::move(layer_names); 316 return *this; 317 } 318 319 // BEGIN(G-API's network parametrization API) 320 GBackend backend() const { return cv::gapi::ie::backend(); } 321 std::string tag() const { return Net::tag(); } 322 cv::util::any params() const { return { desc }; } 323 // END(G-API's network parametrization API) 324 325 protected: 326 detail::ParamDesc desc; 327 }; 328 329 /* 330 * @brief This structure provides functions for generic network type that 331 * fill inference parameters. 332 * @see struct Generic 333 */ 334 template<> 335 class Params<cv::gapi::Generic> { 336 public: 337 /** @brief Class constructor. 338 339 Constructs Params based on model information and sets default values for other 340 inference description parameters. Model is loaded and compiled using OpenVINO Toolkit. 341 342 @param tag string tag of the network for which these parameters are intended. 343 @param model path to topology IR (.xml file). 344 @param weights path to weights (.bin file). 345 @param device target device to use. 346 */ 347 Params(const std::string &tag, 348 const std::string &model, 349 const std::string &weights, 350 const std::string &device) 351 : desc{ model, weights, device, {}, {}, {}, 0u, 0u, 352 detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u, 353 {}}, 354 m_tag(tag) { 355 }; 356 357 /** @overload 358 359 This constructor for pre-compiled networks. Model is imported from pre-compiled 360 blob. 361 362 @param tag string tag of the network for which these parameters are intended. 363 @param model path to model. 364 @param device target device to use. 365 */ 366 Params(const std::string &tag, 367 const std::string &model, 368 const std::string &device) 369 : desc{ model, {}, device, {}, {}, {}, 0u, 0u, 370 detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u, 371 {}}, 372 m_tag(tag) { 373 }; 374 375 /** @see ie::Params::pluginConfig. */ 376 Params& pluginConfig(const IEConfig& cfg) { 377 desc.config = cfg; 378 return *this; 379 } 380 381 /** @overload */ 382 Params& pluginConfig(IEConfig&& cfg) { 383 desc.config = std::move(cfg); 384 return *this; 385 } 386 387 /** @see ie::Params::constInput. */ 388 Params& constInput(const std::string &layer_name, 389 const cv::Mat &data, 390 TraitAs hint = TraitAs::TENSOR) { 391 desc.const_inputs[layer_name] = {data, hint}; 392 return *this; 393 } 394 395 /** @see ie::Params::cfgNumRequests. */ 396 Params& cfgNumRequests(size_t nireq) { 397 GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!"); 398 desc.nireq = nireq; 399 return *this; 400 } 401 402 /** @see ie::Params::cfgInputReshape */ 403 Params& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>&reshape_table) { 404 desc.reshape_table = reshape_table; 405 return *this; 406 } 407 408 /** @overload */ 409 Params& cfgInputReshape(std::map<std::string, std::vector<std::size_t>> && reshape_table) { 410 desc.reshape_table = std::move(reshape_table); 411 return *this; 412 } 413 414 /** @overload */ 415 Params& cfgInputReshape(std::string && layer_name, std::vector<size_t> && layer_dims) { 416 desc.reshape_table.emplace(layer_name, layer_dims); 417 return *this; 418 } 419 420 /** @overload */ 421 Params& cfgInputReshape(const std::string & layer_name, const std::vector<size_t>&layer_dims) { 422 desc.reshape_table.emplace(layer_name, layer_dims); 423 return *this; 424 } 425 426 /** @overload */ 427 Params& cfgInputReshape(std::unordered_set<std::string> && layer_names) { 428 desc.layer_names_to_reshape = std::move(layer_names); 429 return *this; 430 } 431 432 /** @overload */ 433 Params& cfgInputReshape(const std::unordered_set<std::string>&layer_names) { 434 desc.layer_names_to_reshape = layer_names; 435 return *this; 436 } 437 438 // BEGIN(G-API's network parametrization API) 439 GBackend backend() const { return cv::gapi::ie::backend(); } 440 std::string tag() const { return m_tag; } 441 cv::util::any params() const { return { desc }; } 442 // END(G-API's network parametrization API) 443 444 protected: 445 detail::ParamDesc desc; 446 std::string m_tag; 447 }; 448 449 } // namespace ie 450 } // namespace gapi 451 } // namespace cv 452 453 #endif // OPENCV_GAPI_INFER_IE_HPP