namespace InferenceEngine

Overview

Inference Engine Plugin API namespace. More…

namespace InferenceEngine {

// namespaces

namespace InferenceEngine::CLDNNConfigParams;
namespace InferenceEngine::CPUConfigParams;
namespace InferenceEngine::G;
namespace InferenceEngine::GNAConfigParams;
namespace InferenceEngine::GPUConfigParams;
namespace InferenceEngine::GPUContextParams;
namespace InferenceEngine::HeteroConfigParams;
namespace InferenceEngine::Metrics;
namespace InferenceEngine::MultiDeviceConfigParams;
namespace InferenceEngine::NetPass;
namespace InferenceEngine::PluginConfigInternalParams;
namespace InferenceEngine::PluginConfigParams;
namespace InferenceEngine::PrecisionUtils;
    namespace InferenceEngine::PrecisionUtils::details;
namespace InferenceEngine::gapi;
    namespace InferenceEngine::gapi::kernels;
        namespace InferenceEngine::gapi::kernels::areaDownscale32f;
        namespace InferenceEngine::gapi::kernels::areaDownscale8u;
        namespace InferenceEngine::gapi::kernels::areaUpscale;
        namespace InferenceEngine::gapi::kernels::areaUpscale32f;
        namespace InferenceEngine::gapi::kernels::avx;
        namespace InferenceEngine::gapi::kernels::avx512;
        namespace InferenceEngine::gapi::kernels::linear;
        namespace InferenceEngine::gapi::kernels::linear32f;
        namespace InferenceEngine::gapi::kernels::neon;
namespace InferenceEngine::gpu;
namespace InferenceEngine::itt;
    namespace InferenceEngine::itt::domains;

// typedefs

typedef std::set<CNNLayerPtr, LayerNameLess> CNNLayerSet;
typedef std::shared_ptr<ICNNNetwork> CNNNetPtr;
typedef std::shared_ptr<const ICNNNetwork> CNNNetCPtr;
typedef CNNLayer GenericLayer;
typedef std::shared_ptr<CNNLayer> CNNLayerPtr;
typedef std::weak_ptr<CNNLayer> CNNLayerWeakPtr;
typedef std::vector<std::pair<std::string, std::string>> ordered_properties;
typedef std::function<void(const InferenceEngine::CNNLayerPtr, ordered_properties&, ordered_properties&)> printer_callback;
typedef std::shared_ptr<PreProcessDataPlugin> PreProcessDataPtr;
typedef ov::SoPtr<IExecutableNetworkInternal> SoExecutableNetworkInternal;
typedef ov::SoPtr<IInferRequestInternal> SoIInferRequestInternal;
typedef IVariableStateInternal IMemoryStateInternal;
typedef ov::SoPtr<IVariableStateInternal> SoIVariableStateInternal;
typedef IVariableStateInternal MemoryStateInternal;
typedef ov::ICore ICore;
typedef short ie_fp16;
typedef std::function<void()> Task;
typedef tbb::enumerable_thread_specific<T> ThreadLocal;
typedef tbb::concurrent_queue<T> ThreadSafeQueue;
typedef tbb::concurrent_bounded_queue<T> ThreadSafeBoundedQueue;
typedef VariableState MemoryState;
typedef void \* gpu_handle_param;
typedef std::map<std::string, Blob::Ptr> BlobMap;
typedef std::vector<size_t> SizeVector;
typedef std::shared_ptr<Data> DataPtr;
typedef std::shared_ptr<const Data> CDataPtr;
typedef std::weak_ptr<Data> DataWeakPtr;
typedef std::map<std::string, CDataPtr> ConstOutputsDataMap;
typedef std::map<std::string, DataPtr> OutputsDataMap;
typedef std::shared_ptr<IExtension> IExtensionPtr;
typedef std::map<std::string, InputInfo::Ptr> InputsDataMap;
typedef std::map<std::string, InputInfo::CPtr> ConstInputsDataMap;
typedef ov::Any Parameter;
typedef ov::AnyMap ParamMap;

// enums

enum ColorFormat;
enum Layout;
enum LockOp;
enum MeanVariant;
enum ResizeAlgorithm;
enum StatusCode;
enum eDIMS_AXIS;

// structs

struct DataConfig;
struct DescriptionBuffer;
struct Exception;
struct InferenceEngineProfileInfo;
struct LayerConfig;
struct LayerParams;
struct PerfHintsConfig;
struct PreProcessChannel;
template <Precision::ePrecision p>
struct PrecisionTrait;
struct QueryNetworkResult;
struct ROI;
struct ResponseDesc;
struct Version;
template <typename F, typename S, typename... T>
struct is_one_of<F, S, T...>;
template <typename...>
struct is_one_of;

// unions

union UserValue;

// templates

template ExecutorManager;
template IAllocator;
template ICNNNetwork;
template IExecutableNetworkInternal;
template IInferRequestInternal;
template IInferencePlugin;
template ILayerExecImpl;
template ILayerImpl;
template IStreamsExecutor;
template ITaskExecutor;
template IVariableStateInternal;

// classes

class AsyncInferRequestThreadSafeDefault;
class BatchNormalizationLayer;
class BatchToSpaceLayer;
class BatchedBlob;
class BinaryConvolutionLayer;
class Blob;
class BlockingDesc;
class BroadcastLayer;
class BucketizeLayer;
class CNNLayer;
class CNNNetwork;
class CPUStreamsExecutor;
class ClampLayer;
class CompoundBlob;
class ConcatLayer;
class ConstTransformer;
class ConvolutionLayer;
class Core;
class CropLayer;
class Data;
class DeconvolutionLayer;
class DeformableConvolutionLayer;
class DepthToSpaceLayer;
class DeviceIDParser;
class EltwiseLayer;
class ExecutableNetwork;
class ExecutableNetworkThreadSafeDefault;
class ExperimentalDetectronGenerateProposalsSingleImageLayer;
class ExperimentalDetectronPriorGridGeneratorLayer;
class ExperimentalDetectronTopKROIs;
class ExperimentalSparseWeightedReduceLayer;
class Extension;
class FillLayer;
class FullyConnectedLayer;
class GRNLayer;
class GRUCell;
class GatherLayer;
class GemmLayer;
class I420Blob;
class IExecutableNetwork;
class IExtension;
class IInferRequest;
class IPreProcessData;
class IReader;
class ImmediateExecutor;
class InferRequest;
class InputInfo;
class LSTMCell;
class LayerNameLess;
template <>
class LockedMemory<void>;
template <class T>
class LockedMemory;
template <class T>
class LockedMemory<const T>;
class MVNLayer;
class MathLayer;
class MemoryBlob;
class NV12Blob;
class NonMaxSuppressionLayer;
class NormLayer;
class OneHotLayer;
class PReLULayer;
class PadLayer;
class Paddings;
class PoolingLayer;
class PowerLayer;
class PreProcessData;
class PreProcessDataPlugin;
class PreProcessInfo;
class Precision;
class PreprocEngine;
template <class T, int N = MAX_DIMS_NUMBER>
class PropertyVector;
class QuantizeLayer;
class RNNCell;
class RNNCellBase;
class RNNSequenceLayer;
class RangeLayer;
class ReLU6Layer;
class ReLULayer;
class ReduceLayer;
class RemoteBlob;
class RemoteContext;
class ReshapeLayer;
class ReverseSequenceLayer;
class ScaleShiftLayer;
class ScatterElementsUpdateLayer;
class ScatterUpdateLayer;
class SelectLayer;
class ShuffleChannelsLayer;
class SoftMaxLayer;
class SpaceToBatchLayer;
class SpaceToDepthLayer;
class SparseFillEmptyRowsLayer;
class SparseSegmentReduceLayer;
class SparseToDenseLayer;
class SplitLayer;
class StridedSliceLayer;
class TBBStreamsExecutor;
template <
    typename T,
    typename = std::enable_if<std::is_standard_layout<T>::value&& std::is_trivial<T>::value>
    >
class TBlob;
class TensorDesc;
class TensorIterator;
template <typename T>
class ThreadSafeBoundedPriorityQueue;
template <typename T>
class ThreadSafeQueueWithSize;
class TileLayer;
class TopKLayer;
class UniqueLayer;
class VariableState;
class WeightableLayer;

// global variables

constexpr const int MAX_DIMS_NUMBER = 12;
static constexpr auto HDDL_GRAPH_TAG = "HDDL_GRAPH_TAG";
static constexpr auto HDDL_STREAM_ID = "HDDL_STREAM_ID";
static constexpr auto HDDL_DEVICE_TAG = "HDDL_DEVICE_TAG";
static constexpr auto HDDL_BIND_DEVICE = "HDDL_BIND_DEVICE";
static constexpr auto HDDL_RUNTIME_PRIORITY = "HDDL_RUNTIME_PRIORITY";
static constexpr auto HDDL_USE_SGAD = "HDDL_USE_SGAD";
static constexpr auto HDDL_GROUP_DEVICE = "HDDL_GROUP_DEVICE";
static constexpr auto MYRIAD_ENABLE_FORCE_RESET = "MYRIAD_ENABLE_FORCE_RESET";
static constexpr auto MYRIAD_DDR_TYPE = "MYRIAD_DDR_TYPE";
static constexpr auto MYRIAD_DDR_AUTO = "MYRIAD_DDR_AUTO";
static constexpr auto MYRIAD_DDR_MICRON_2GB = "MYRIAD_DDR_MICRON_2GB";
static constexpr auto MYRIAD_DDR_SAMSUNG_2GB = "MYRIAD_DDR_SAMSUNG_2GB";
static constexpr auto MYRIAD_DDR_HYNIX_2GB = "MYRIAD_DDR_HYNIX_2GB";
static constexpr auto MYRIAD_DDR_MICRON_1GB = "MYRIAD_DDR_MICRON_1GB";
static constexpr auto MYRIAD_PROTOCOL = "MYRIAD_PROTOCOL";
static constexpr auto MYRIAD_PCIE = "MYRIAD_PCIE";
static constexpr auto MYRIAD_USB = "MYRIAD_USB";
static constexpr auto MYRIAD_THROUGHPUT_STREAMS = "MYRIAD_THROUGHPUT_STREAMS";
static constexpr auto MYRIAD_THROUGHPUT_STREAMS_AUTO = "MYRIAD_THROUGHPUT_STREAMS_AUTO";
static constexpr auto MYRIAD_ENABLE_HW_ACCELERATION = "MYRIAD_ENABLE_HW_ACCELERATION";
static constexpr auto MYRIAD_ENABLE_RECEIVING_TENSOR_TIME = "MYRIAD_ENABLE_RECEIVING_TENSOR_TIME";
static constexpr auto MYRIAD_CUSTOM_LAYERS = "MYRIAD_CUSTOM_LAYERS";

// global functions

template <class T, class Ordering = std::function<details::OutInfoWrapper(CNNLayer\*)>>
bool CNNNetDFS(
    const InferenceEngine::CNNLayerPtr& layer,
    const T& visit,
    bool visitBefore = true,
    const Ordering& order = &details::default_order
    );

template <class T>
bool CNNNetForestDFS(
    const std::vector<DataPtr>& heads,
    const T& visit,
    bool bVisitBefore
    );

template <class Forest, class T>
bool CNNNetForestDFS(
    const Forest& heads,
    const T& visit,
    bool bVisitBefore
    );

template <class Ordering, class Forest, class T>
bool CNNNetForestDFS(
    const Forest& heads,
    const T& visit,
    bool bVisitBefore,
    const Ordering& order
    );

template <class T>
void CNNNetBFS(
    const InferenceEngine::CNNLayerPtr& layer,
    const T& visit
    );

bool CNNNetHasPrevLayer(const InferenceEngine::CNNLayer \* layer, int idx = 0);
CNNLayerSet CNNNetGetAllInputLayers(const CNNNetwork& network);
CNNLayerSet CNNNetGetAllInputLayers(ICNNNetwork \* network);
CNNLayerSet CNNNetGetAllInputLayers(CNNLayer \* layer);

template <class LayerOrdering>
std::vector<CNNLayerPtr> CNNNetSortTopologicallyEx(
    const CNNNetwork& network,
    LayerOrdering ordering
    );

template <class Copier>
CNNNetwork CNNNetCopy(
    const CNNNetwork& input,
    const Copier& cp
    );

CNNNetwork CNNNetCopy(const CNNNetwork& input);
CNNLayerWeakPtr& getCreatorLayer(const DataPtr& data);
std::map<std::string, CNNLayerPtr>& getInputTo(const DataPtr& data);
std::map<std::string, CNNLayerPtr>& getInputTo(Data \* data);
Paddings getPaddingsImpl(const CNNLayer& layer);

template <class T>
std::enable_if<is_one_of<T, DeformableConvolutionLayer, DeconvolutionLayer, ConvolutionLayer, BinaryConvolutionLayer, PoolingLayer>::value, Paddings>::type getPaddings(const T& layer);

int getNumIteration(const TensorIterator& ti);
DataPtr cloneData(const Data& source);
CNNLayerPtr clonelayer(const CNNLayer& source);
InferenceEngine::details::CNNNetworkImplPtr cloneNet(const std::vector<InferenceEngine::CNNLayerPtr>& layers);
InferenceEngine::CNNNetwork cloneNetwork(const InferenceEngine::CNNNetwork& network);
InferenceEngine::details::CNNNetworkImplPtr cloneNet(const InferenceEngine::CNNNetwork& network);

void saveGraphToDot(
    const InferenceEngine::CNNNetwork& network,
    std::ostream& out,
    printer_callback layer_cb = nullptr
    );

template <class InjectType>
CNNLayerPtr injectData(
    const CNNLayer& sourceLayer,
    const InjectType& value = InjectType()
    );

template <class InjectType>
CNNLayerPtr injectData(
    CNNLayerPtr sourceLayer,
    const InjectType& value = InjectType()
    );

template <class Transformer>
void transformLayer(
    const CNNLayer& sourceLayer,
    const Transformer& transformer
    );

template <class Transformer>
void transformLayer(
    CNNLayerPtr sourceLayer,
    const Transformer& transformer
    );

template <class InjectType>
InjectType \* getInjectedData(const CNNLayer& sourceLayer);

template <class InjectType>
InjectType \* getInjectedData(CNNLayerPtr sourceLayer);

void CreatePreProcessData(std::shared_ptr<IPreProcessData>& data);
PreProcessDataPtr CreatePreprocDataHelper();
void blob_copy(Blob::Ptr src, Blob::Ptr dst);
PreProcessInfo copyPreProcess(const PreProcessInfo& from);

template <typename T>
std::map<std::string, std::shared_ptr<const T>> constMapCast(const std::map<std::string, std::shared_ptr<T>>& map);

template <typename T>
std::map<std::string, std::shared_ptr<T>> constMapCast(const std::map<std::string, std::shared_ptr<const T>>& map);

InputsDataMap copyInfo(const InputsDataMap& networkInputs);
OutputsDataMap copyInfo(const OutputsDataMap& networkOutputs);

void SetExeNetworkInfo(
    const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
    const std::shared_ptr<const ov::Model>& function,
    bool new_api
    );

std::unordered_set<std::string> GetRemovedNodes(
    const std::shared_ptr<const ov::Model>& originalFunction,
    const std::shared_ptr<const ov::Model>& transformedFunction
    );

std::unordered_set<std::string> GetSupportedNodes(
    const std::shared_ptr<const ov::Model>& model,
    std::function<void(std::shared_ptr<ov::Model>&)> transform,
    std::function<bool(const std::shared_ptr<ngraph::Node>)> is_node_supported
    );

std::string getIELibraryPath();
inline ::ov::util::FilePath getInferenceEngineLibraryPath();
bool checkOpenMpEnvVars(bool includeOMPNumThreads = true);
std::vector<int> getAvailableNUMANodes();
std::vector<int> getAvailableCoresTypes();
int getNumberOfCPUCores(bool bigCoresOnly = false);
int getNumberOfLogicalCPUCores(bool bigCoresOnly = false);
bool with_cpu_x86_sse42();
bool with_cpu_x86_avx();
bool with_cpu_x86_avx2();
bool with_cpu_x86_avx512f();
bool with_cpu_x86_avx512_core();
bool with_cpu_x86_avx512_core_vnni();
bool with_cpu_x86_bfloat16();
bool with_cpu_x86_avx512_core_amx_int8();
bool with_cpu_x86_avx512_core_amx_bf16();
bool with_cpu_x86_avx512_core_amx();
ExecutorManager::Ptr executorManager();
std::shared_ptr<InferenceEngine::IAllocator> CreateDefaultAllocator();

template <
    typename T,
    typename std::enable_if<!std::is_pointer<T>::value&&!std::is_reference<T>::value, int>::type = 0,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<T> as(const Blob::Ptr& blob);

template <
    typename T,
    typename std::enable_if<!std::is_pointer<T>::value&&!std::is_reference<T>::value, int>::type = 0,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<const T> as(const Blob::CPtr& blob);

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(const TensorDesc& tensorDesc);

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(
    const TensorDesc& tensorDesc,
    Type \* ptr,
    size_t size = 0
    );

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(
    const TensorDesc& tensorDesc,
    const std::shared_ptr<InferenceEngine::IAllocator>& alloc
    );

template <typename TypeTo>
InferenceEngine::TBlob<TypeTo>::Ptr make_shared_blob(const TBlob<TypeTo>& arg);

template <
    typename T,
    typename... Args,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<T> make_shared_blob(Args&&... args);

Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi);

Blob::Ptr make_shared_blob(
    const Blob::Ptr& inputBlob,
    const std::vector<size_t>& begin,
    const std::vector<size_t>& end
    );

std::ostream& operator << (std::ostream& out, const Layout& p);
std::ostream& operator << (std::ostream& out, const ColorFormat& fmt);
void shutdown();

template <typename T = IExtension>
std::shared_ptr<T> make_so_pointer(const std::string& name);

void CreateExtensionShared(IExtensionPtr& ext);
StatusCode CreateExtension(IExtension \*& ext, ResponseDesc \* resp);

TensorDesc make_roi_desc(
    const TensorDesc& origDesc,
    const ROI& roi,
    bool useOrigMemDesc
    );

TensorDesc make_roi_desc(
    const TensorDesc& origDesc,
    const std::vector<size_t>& begin,
    const std::vector<size_t>& end,
    bool useOrigMemDesc
    );

RemoteBlob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx);
void LowLatency(InferenceEngine::CNNNetwork& network);

void lowLatency2(
    InferenceEngine::CNNNetwork& network,
    bool use_const_initializer = true
    );

const Version \* GetInferenceEngineVersion();

} // namespace InferenceEngine

Detailed Documentation

Inference Engine Plugin API namespace.

Inference Engine C++ API.

Typedefs

typedef CNNLayer GenericLayer

Alias for CNNLayer object.

typedef std::shared_ptr<CNNLayer> CNNLayerPtr

A smart pointer to the CNNLayer.

typedef std::weak_ptr<CNNLayer> CNNLayerWeakPtr

A smart weak pointer to the CNNLayer.

typedef ov::SoPtr<IExecutableNetworkInternal> SoExecutableNetworkInternal

SoPtr to IExecutableNetworkInternal.

typedef ov::SoPtr<IInferRequestInternal> SoIInferRequestInternal

SoPtr to IInferRequestInternal.

typedef IVariableStateInternal IMemoryStateInternal

For compatibility reasons.

typedef ov::SoPtr<IVariableStateInternal> SoIVariableStateInternal

SoPtr to IVariableStateInternal.

typedef IVariableStateInternal MemoryStateInternal

For compatibility reasons.

typedef VariableState MemoryState

For compatibility reasons.

typedef void \* gpu_handle_param

Shortcut for defining a handle parameter.

typedef std::map<std::string, Blob::Ptr> BlobMap

This is a convenient type for working with a map containing pairs(string, pointer to a Blob instance).

typedef std::vector<size_t> SizeVector

Represents tensor size.

The order is opposite to the order in Caffe*: (w,h,n,b) where the most frequently changing element in memory is first.

typedef std::shared_ptr<Data> DataPtr

Smart pointer to Data.

typedef std::shared_ptr<const Data> CDataPtr

Smart pointer to constant Data.

typedef std::weak_ptr<Data> DataWeakPtr

Smart weak pointer to Data.

typedef std::map<std::string, CDataPtr> ConstOutputsDataMap

A collection that contains string as key, and const Data smart pointer as value.

typedef std::map<std::string, DataPtr> OutputsDataMap

A collection that contains string as key, and Data smart pointer as value.

typedef std::shared_ptr<IExtension> IExtensionPtr

A shared pointer to a IExtension interface.

typedef std::map<std::string, InputInfo::Ptr> InputsDataMap

A collection that contains string as key, and InputInfo smart pointer as value.

typedef std::map<std::string, InputInfo::CPtr> ConstInputsDataMap

A collection that contains string as key, and const InputInfo smart pointer as value.

typedef ov::Any Parameter

Alias for type that can store any value.

Global Variables

static constexpr auto HDDL_GRAPH_TAG = "HDDL_GRAPH_TAG"

[Only for OpenVINO Intel HDDL device] Type: Arbitrary non-empty string. If empty (“”), equals no set, default: “”; This option allows to specify the number of MYX devices used for inference a specific Executable network. Note: Only one network would be allocated to one device. The number of devices for the tag is specified in the hddl_service.config file. Example: “service_settings”: { “graph_tag_map”: { “tagA”:3 } } It means that an executable network marked with tagA will be executed on 3 devices

static constexpr auto HDDL_STREAM_ID = "HDDL_STREAM_ID"

[Only for OpenVINO Intel HDDL device] Type: Arbitrary non-empty string. If empty (“”), equals no set, default: “”; This config makes the executable networks to be allocated on one certain device (instead of multiple devices). And all inference through this executable network, will be done on this device. Note: Only one network would be allocated to one device. The number of devices which will be used for stream-affinity must be specified in hddl_service.config file. Example: “service_settings”: { “stream_device_number”:5 } It means that 5 device will be used for stream-affinity

static constexpr auto HDDL_DEVICE_TAG = "HDDL_DEVICE_TAG"

[Only for OpenVINO Intel HDDL device] Type: Arbitrary non-empty string. If empty (“”), equals no set, default: “”; This config allows user to control device flexibly. This config gives a “tag” for a certain device while allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this “tag”. Devices used for such use case is controlled by a so-called “Bypass Scheduler” in HDDL backend, and the number of such device need to be specified in hddl_service.config file. Example: “service_settings”: { “bypass_device_number”: 5 } It means that 5 device will be used for Bypass scheduler.

static constexpr auto HDDL_BIND_DEVICE = "HDDL_BIND_DEVICE"

[Only for OpenVINO Intel HDDL device] Type: “YES/NO”, default is “NO”. This config is a sub-config of DEVICE_TAG, and only available when “DEVICE_TAG” is set. After a user load a network, the user got a handle for the network. If “YES”, the network allocated is bind to the device (with the specified “DEVICE_TAG”), which means all afterwards inference through this network handle will be executed on this device only. If “NO”, the network allocated is not bind to the device (with the specified “DEVICE_TAG”). If the same network is allocated on multiple other devices (also set BIND_DEVICE to “False”), then inference through any handle of these networks may be executed on any of these devices those have the network loaded.

static constexpr auto HDDL_RUNTIME_PRIORITY = "HDDL_RUNTIME_PRIORITY"

[Only for OpenVINO Intel HDDL device] Type: A signed int wrapped in a string, default is “0”. This config is a sub-config of DEVICE_TAG, and only available when “DEVICE_TAG” is set and “BIND_DEVICE” is “False”. When there are multiple devices running a certain network (a same network running on multiple devices in Bypass Scheduler), the device with a larger number has a higher priority, and more inference tasks will be fed to it with priority.

static constexpr auto HDDL_USE_SGAD = "HDDL_USE_SGAD"

[Only for OpenVINO Intel HDDL device] Type: “YES/NO”, default is “NO”. SGAD is short for “Single Graph All Device”. With this scheduler, once application allocates 1 network, all devices (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them.

static constexpr auto HDDL_GROUP_DEVICE = "HDDL_GROUP_DEVICE"

[Only for OpenVINO Intel HDDL device] Type: A signed int wrapped in a string, default is “0”. This config gives a “group id” for a certain device when this device has been reserved for certain client, client can use this device grouped by calling this group id while other client can’t use this device Each device has their own group id. Device in one group shares same group id.

static constexpr auto MYRIAD_ENABLE_FORCE_RESET = "MYRIAD_ENABLE_FORCE_RESET"

The flag to reset stalled devices. This is a plugin scope option and must be used with the plugin’s SetConfig method The only possible values are: CONFIG_VALUE(YES) CONFIG_VALUE(NO) (default value)

static constexpr auto MYRIAD_DDR_TYPE = "MYRIAD_DDR_TYPE"

This option allows to specify device memory type.

static constexpr auto MYRIAD_DDR_AUTO = "MYRIAD_DDR_AUTO"

Supported keys definition for InferenceEngine::MYRIAD_DDR_TYPE option.

static constexpr auto MYRIAD_PROTOCOL = "MYRIAD_PROTOCOL"

This option allows to specify protocol.

static constexpr auto MYRIAD_PCIE = "MYRIAD_PCIE"

Supported keys definition for InferenceEngine::MYRIAD_PROTOCOL option.

static constexpr auto MYRIAD_THROUGHPUT_STREAMS = "MYRIAD_THROUGHPUT_STREAMS"

Optimize vpu plugin execution to maximize throughput. This option should be used with integer value which is the requested number of streams. The only possible values are: 1 2 3.

static constexpr auto MYRIAD_THROUGHPUT_STREAMS_AUTO = "MYRIAD_THROUGHPUT_STREAMS_AUTO"

Default key definition for InferenceEngine::MYRIAD_THROUGHPUT_STREAMS option.

static constexpr auto MYRIAD_ENABLE_HW_ACCELERATION = "MYRIAD_ENABLE_HW_ACCELERATION"

Turn on HW stages usage (applicable for MyriadX devices only). The only possible values are: CONFIG_VALUE(YES) (default value) CONFIG_VALUE(NO)

static constexpr auto MYRIAD_ENABLE_RECEIVING_TENSOR_TIME = "MYRIAD_ENABLE_RECEIVING_TENSOR_TIME"

The flag for adding to the profiling information the time of obtaining a tensor. The only possible values are: CONFIG_VALUE(YES) CONFIG_VALUE(NO) (default value)

static constexpr auto MYRIAD_CUSTOM_LAYERS = "MYRIAD_CUSTOM_LAYERS"

This option allows to pass custom layers binding xml. If layer is present in such an xml, it would be used during inference even if the layer is natively supported.

Global Functions

template <class T, class Ordering = std::function<details::OutInfoWrapper(CNNLayer\*)>>
bool CNNNetDFS(
    const InferenceEngine::CNNLayerPtr& layer,
    const T& visit,
    bool visitBefore = true,
    const Ordering& order = &details::default_order
    )

Generic DFS algorithm traverser

Parameters:

layer

  • starting layer

visit

  • callback to be called upon visiting

visitBefore

  • indicates when callback is happened before all child nodes or after

template <class T>
bool CNNNetForestDFS(
    const std::vector<DataPtr>& heads,
    const T& visit,
    bool bVisitBefore
    )

DFS algorithm with multiple starting data

Parameters:

layer

  • starting data

visit

  • callback to be called upon visiting

visitBefore

  • indicates when callback is happened before all child nodes or after

template <class Forest, class T>
bool CNNNetForestDFS(
    const Forest& heads,
    const T& visit,
    bool bVisitBefore
    )

DFS algorithm with multiple starting nodes

Parameters:

layer

  • starting layer

visit

  • callback to be called upon visiting

visitBefore

  • indicates when callback is happened before all child nodes or after

template <class Ordering, class Forest, class T>
bool CNNNetForestDFS(
    const Forest& heads,
    const T& visit,
    bool bVisitBefore,
    const Ordering& order
    )

DFS algorithm with multiple starting nodes

Parameters:

layer

  • starting layer

visit

  • callback to be called upon visiting

visitBefore

  • indicates when callback is happened before all child nodes or after

template <class T>
void CNNNetBFS(
    const InferenceEngine::CNNLayerPtr& layer,
    const T& visit
    )

Generic BFS algorithm traverser

Parameters:

layer

  • starting layer

visit

  • callback to be called upon visiting

bool CNNNetHasPrevLayer(const InferenceEngine::CNNLayer \* layer, int idx = 0)

pointer of previous layers

Parameters:

idx

  • index in previous layer collection

layer

CNNLayerSet CNNNetGetAllInputLayers(const CNNNetwork& network)

returns all layers that are input or memory

Parameters:

network

Returns:

set of input layers

CNNLayerSet CNNNetGetAllInputLayers(CNNLayer \* layer)

returns all layers that are input or memory , search started from arbitrary location in network

Parameters:

start

layer

Returns:

set of input layers

template <class LayerOrdering>
std::vector<CNNLayerPtr> CNNNetSortTopologicallyEx(
    const CNNNetwork& network,
    LayerOrdering ordering
    )

Sorts CNNNetork graph in topological order, while uses custom ordering when walking among child nodes.

Parameters:

network

ordering

  • callback that returns output layers for given CNNLayer pointer, see default_order function

Returns:

sorted CNNNetwork layers

template <class Copier>
CNNNetwork CNNNetCopy(
    const CNNNetwork& input,
    const Copier& cp
    )

deep copy of the entire network, structure using custom copier for layers

Parameters:

input

  • source network

cp

  • custom copier object, ex: [](CNNLayerPtr lp) { return injectData<EmptyStruct>(lp); }

Returns:

copied network

CNNNetwork CNNNetCopy(const CNNNetwork& input)

deep copy of the entire network

Parameters:

input

Returns:

Paddings getPaddingsImpl(const CNNLayer& layer)

gets padding with runtime type check

template <class T>
std::enable_if<is_one_of<T, DeformableConvolutionLayer, DeconvolutionLayer, ConvolutionLayer, BinaryConvolutionLayer, PoolingLayer>::value, Paddings>::type getPaddings(const T& layer)

gets padding without compile-time type check

int getNumIteration(const TensorIterator& ti)

Calculate number of iteration required for provided TI layer.

Parameters:

ti

TensorIterator layer to parse

Returns:

positive value in case of correct TI layer, -1 in case of inconsistency

DataPtr cloneData(const Data& source)

Creates data object copy unconnected to any graph.

Parameters:

source

  • source data object

Returns:

Shared pointer to new data object

CNNLayerPtr clonelayer(const CNNLayer& source)

Creates layer object copy, unconnected to any grapoh.

Parameters:

source

  • source layer object

Returns:

Shared pointer to new layer object

InferenceEngine::details::CNNNetworkImplPtr cloneNet(const std::vector<InferenceEngine::CNNLayerPtr>& layers)

Clones selected set of nodes into separate network only connections between passed nodes will be duplicated.

Parameters:

layers

Layers to clone, must all be in same network

networkStats

A network statistic to clone

Returns:

Cloned network

Clones the whole network without conversion to CNNNetworkImpl. All layers and data objects will be cloned.

Blobs inside layers are reused

Parameters:

network

A network to clone

Returns:

A cloned object

InferenceEngine::details::CNNNetworkImplPtr cloneNet(const InferenceEngine::CNNNetwork& network)

Clones the whole network. All layers and data objects will be cloned.

Blobs inside layers are reused

Parameters:

network

A network to clone

Returns:

A cloned object

void saveGraphToDot(
    const InferenceEngine::CNNNetwork& network,
    std::ostream& out,
    printer_callback layer_cb = nullptr
    )

Visualize network in GraphViz (.dot) format and write to output stream.

Parameters:

network

  • graph to visualize

out

  • output stream for saving graph

layer_cb

  • callback function, that called on every printed layer node

template <class InjectType>
CNNLayerPtr injectData(
    const CNNLayer& sourceLayer,
    const InjectType& value = InjectType()
    )

creates copy of source layer, with injected arbitrary data

Parameters:

InjectType

data type to be injected

sourceLayer

value

injected value

Returns:

newly created layer with injected data

template <class Transformer>
void transformLayer(
    const CNNLayer& sourceLayer,
    const Transformer& transformer
    )

transforms of source layer

Parameters:

InjectType

data type to be injected

sourceLayer

value

injected value

Returns:

newly created layer with injected data

template <class InjectType>
InjectType \* getInjectedData(const CNNLayer& sourceLayer)

getPointer to injected data

Parameters:

InjectType

sourceLayer

Returns:

if previously data of type InjectType was injected, will return pointer to it, nullptr otherwise

PreProcessInfo copyPreProcess(const PreProcessInfo& from)

Copies preprocess info.

Parameters:

from

PreProcessInfo to copy from

Returns:

copy of preprocess info

template <typename T>
std::map<std::string, std::shared_ptr<const T>> constMapCast(const std::map<std::string, std::shared_ptr<T>>& map)

Copies the values of std::string indexed map and apply const cast.

Parameters:

map

map to copy

Returns:

map that contains pointers to constant values

template <typename T>
std::map<std::string, std::shared_ptr<T>> constMapCast(const std::map<std::string, std::shared_ptr<const T>>& map)

Copies the values of std::string indexed map and apply const cast.

Parameters:

map

map to copy

Returns:

map that contains pointers to values

InputsDataMap copyInfo(const InputsDataMap& networkInputs)

Copies InputInfo.

Parameters:

networkInputs

The network inputs to copy from

Returns:

copy of network inputs

OutputsDataMap copyInfo(const OutputsDataMap& networkOutputs)

Copies OutputsData.

Parameters:

networkInputs

network outputs to copy from

Returns:

copy of network outputs

void SetExeNetworkInfo(
    const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
    const std::shared_ptr<const ov::Model>& function,
    bool new_api
    )

Set input and output information to executable network. This method is used to set additional information to InferenceEngine::IExecutableNetworkInternal created by device plugin.

Parameters:

exeNetwork

Executable network object

function

Model with initial execution info

std::unordered_set<std::string> GetRemovedNodes(
    const std::shared_ptr<const ov::Model>& originalFunction,
    const std::shared_ptr<const ov::Model>& transformedFunction
    )

Returns set of nodes which were removed after transformation. If originalFunction contains node1 and transformedFunction does not contains node1 in ops list, node1 will be returned.

Parameters:

originalFunction

Original network

transformedFunction

Transformed network

Returns:

Set of strings which contains removed node names

std::unordered_set<std::string> GetSupportedNodes(
    const std::shared_ptr<const ov::Model>& model,
    std::function<void(std::shared_ptr<ov::Model>&)> transform,
    std::function<bool(const std::shared_ptr<ngraph::Node>)> is_node_supported
    )

Returns set of nodes from original model which are determined as supported after applied transformation pipeline.

Parameters:

model

Original model

transform

Transformation pipeline function

is_node_supported

Function returning whether node is supported or not

Returns:

Set of strings which contains supported node names

std::shared_ptr<InferenceEngine::IAllocator> CreateDefaultAllocator()

Creates the default implementation of the Inference Engine allocator per plugin.

Returns:

The Inference Engine IAllocator* instance

template <
    typename T,
    typename std::enable_if<!std::is_pointer<T>::value&&!std::is_reference<T>::value, int>::type = 0,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<T> as(const Blob::Ptr& blob)

Helper cast function to work with shared Blob objects.

Parameters:

blob

A blob to cast

Returns:

shared_ptr to the type T. Returned shared_ptr shares ownership of the object with the input Blob::Ptr

template <
    typename T,
    typename std::enable_if<!std::is_pointer<T>::value&&!std::is_reference<T>::value, int>::type = 0,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<const T> as(const Blob::CPtr& blob)

Helper cast function to work with shared Blob objects.

Parameters:

blob

A blob to cast

Returns:

shared_ptr to the type const T. Returned shared_ptr shares ownership of the object with the input Blob::Ptr

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(const TensorDesc& tensorDesc)

Creates a blob with the given tensor descriptor.

Parameters:

Type

Type of the shared pointer to be created

tensorDesc

Tensor descriptor for Blob creation

Returns:

A shared pointer to the newly created blob of the given type

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(
    const TensorDesc& tensorDesc,
    Type \* ptr,
    size_t size = 0
    )

Creates a blob with the given tensor descriptor from the pointer to the pre-allocated memory.

Parameters:

Type

Type of the shared pointer to be created

tensorDesc

TensorDesc for Blob creation

ptr

Pointer to the pre-allocated memory

size

Length of the pre-allocated array

Returns:

A shared pointer to the newly created blob of the given type

template <typename Type>
InferenceEngine::TBlob<Type>::Ptr make_shared_blob(
    const TensorDesc& tensorDesc,
    const std::shared_ptr<InferenceEngine::IAllocator>& alloc
    )

Creates a blob with the given tensor descriptor and allocator.

Parameters:

Type

Type of the shared pointer to be created

tensorDesc

Tensor descriptor for Blob creation

alloc

Shared pointer to IAllocator to use in the blob

Returns:

A shared pointer to the newly created blob of the given type

template <typename TypeTo>
InferenceEngine::TBlob<TypeTo>::Ptr make_shared_blob(const TBlob<TypeTo>& arg)

Creates a copy of given TBlob instance.

Parameters:

TypeTo

Type of the shared pointer to be created

arg

given pointer to blob

Returns:

A shared pointer to the newly created blob of the given type

template <
    typename T,
    typename... Args,
    typename std::enable_if<std::is_base_of<Blob, T>::value, int>::type = 0
    >
std::shared_ptr<T> make_shared_blob(Args&&... args)

Creates a Blob object of the specified type.

Parameters:

args

Constructor arguments for the Blob object

Returns:

A shared pointer to the newly created Blob object

Blob::Ptr make_shared_blob(const Blob::Ptr& inputBlob, const ROI& roi)

Creates a blob describing given ROI object based on the given blob with pre-allocated memory.

Parameters:

inputBlob

original blob with pre-allocated memory.

roi

A ROI object inside of the original blob.

Returns:

A shared pointer to the newly created blob.

Blob::Ptr make_shared_blob(
    const Blob::Ptr& inputBlob,
    const std::vector<size_t>& begin,
    const std::vector<size_t>& end
    )

Creates a blob describing given ROI object based on the given blob with pre-allocated memory.

Parameters:

inputBlob

original blob with pre-allocated memory.

begin

A ROI object start coordinate inside of the original blob.

end

A ROI object end coordinate inside of the original blob.

Returns:

A shared pointer to the newly created blob.

std::ostream& operator << (std::ostream& out, const Layout& p)

Prints a string representation of InferenceEngine::Layout to a stream.

Parameters:

out

An output stream to send to

p

A layout value to print to a stream

Returns:

A reference to the out stream

std::ostream& operator << (std::ostream& out, const ColorFormat& fmt)

Prints a string representation of InferenceEngine::ColorFormat to a stream.

Parameters:

out

An output stream to send to

fmt

A color format value to print to a stream

Returns:

A reference to the out stream

void shutdown()

Shut down the OpenVINO by deleting all static-duration objects allocated by the library and releasing dependent resources.

This function should be used by advanced user to control unload the resources.

You might want to use this function if you are developing a dynamically-loaded library which should clean up all resources after itself when the library is unloaded.

template <typename T = IExtension>
std::shared_ptr<T> make_so_pointer(const std::string& name)

Creates extension using deprecated API.

Parameters:

T

extension type

name

extension library name

Returns:

shared pointer to extension

void CreateExtensionShared(IExtensionPtr& ext)

Creates the default instance of the extension.

Parameters:

ext

Extension interface

StatusCode CreateExtension(IExtension \*& ext, ResponseDesc \* resp)

Creates the default instance of the extension.

: Deprecated API

Parameters:

ext

Extension interface

resp

Responce

Returns:

InferenceEngine::OK if extension is constructed and InferenceEngine::GENERAL_ERROR otherwise

TensorDesc make_roi_desc(
    const TensorDesc& origDesc,
    const ROI& roi,
    bool useOrigMemDesc
    )

Creates a TensorDesc object for ROI.

Parameters:

origDesc

original TensorDesc object.

roi

An image ROI object inside of the original object.

useOrigMemDesc

Flag to use original memory description (strides/offset). Should be set if the new TensorDesc describes shared memory.

Returns:

A newly created TensorDesc object representing ROI.

TensorDesc make_roi_desc(
    const TensorDesc& origDesc,
    const std::vector<size_t>& begin,
    const std::vector<size_t>& end,
    bool useOrigMemDesc
    )

Creates a TensorDesc object for ROI.

Parameters:

origDesc

original TensorDesc object.

begin

start coordinate of ROI object inside of the original object.

end

end coordinate of ROI object inside of the original object.

useOrigMemDesc

Flag to use original memory description (strides/offset). Should be set if the new TensorDesc describes shared memory.

Returns:

A newly created TensorDesc object representing ROI.

RemoteBlob::Ptr make_shared_blob(const TensorDesc& desc, RemoteContext::Ptr ctx)

A wrapper of CreateBlob method of RemoteContext to keep consistency with plugin-specific wrappers.

Parameters:

desc

Defines the layout and dims of the blob

ctx

Pointer to the plugin object derived from RemoteContext.

Returns:

A pointer to plugin object that implements RemoteBlob interface.

void LowLatency(InferenceEngine::CNNNetwork& network)

The transformation finds all TensorIterator layers in the network, processes all back edges that describe a connection between Result and Parameter of the TensorIterator body, and inserts ReadValue layer between Parameter and the next layers after this Parameter, and Assign layer after the layers before the Result layer. Supported platforms: CPU, GNA.

Deprecated Use InferenceEngine::lowLatency2 instead. This transformation will be removed in 2023.1.

The example below describes the changes to the inner part (body, back edges) of the TensorIterator layer. [] - TensorIterator body () - new layer

before applying the transformation: back_edge_1 -> [Parameter -> some layers … -> Result ] -> back_edge_1

after applying the transformation: back_edge_1 -> [Parameter -> (ReadValue layer) -> some layers … -> (Assign layer) ] -> Result ] -> back_edge_1

It is recommended to use this transformation in conjunction with the Reshape feature to set sequence dimension to 1 and with the UnrollTensorIterator transformation. For convenience, we have already enabled the unconditional execution of the UnrollTensorIterator transformation when using the LowLatency transformation for CPU, GNA plugins, no action is required here. After applying both of these transformations, the resulting network can be inferred step by step, the states will store between inferences.

An illustrative example, not real API:

network->reshape(…) // Set sequence dimension to 1, recalculating shapes. Optional, depends on the network. LowLatency(network) // Applying LowLatency and UnrollTensorIterator transformations. network->infer (…) // Calculating new values for states. // All states are stored between inferences via Assign, ReadValue layers. network->infer (…) // Using stored states, calculating new values for states.

Parameters:

network

A network to apply LowLatency transformation

void lowLatency2(
    InferenceEngine::CNNNetwork& network,
    bool use_const_initializer = true
    )

The transformation finds all TensorIterator/Loop layers in the network, processes all back edges that describe a connection between Result and Parameter of the TensorIterator/Loop bodies,and inserts ReadValue and Assign layers at the input and output corresponding to this back edge. Supported platforms: CPU, GNA.

The example below describes the changes made by the transformation [] - TensorIterator body () - new layer BE - back-edge

before applying the transformation: -> input1[BE_1 -> Parameter -> Layers … -> Result -> BE_1 ]output1->

after applying the transformation: ->(ReadValue)-> input1[BE_1 ->Parameter->Layers …->Result->BE_1]output1 ->(Assign) ->… After applying the transformation, the resulting network can be inferred step by step, the states will store between inferences.

Parameters:

network

A network to apply LowLatency transformation

use_const_initializer

Changes the type of the initializing subgraph for ReadValue operations. If “true”, then the transformation inserts Constant before ReadValue operation. If “false, then the transformation leaves existed initializing subgraph for ReadValue operation. Loop operation by a given number. Does not affect TensorIterators.

const Version \* GetInferenceEngineVersion()

Gets the current Inference Engine version.

Returns:

The current Inference Engine version