ie_core.hpp
Go to the documentation of this file.
1 // Copyright (C) 2018-2021 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief This is a header file for the Inference Engine Core class C++ API
7  *
8  * @file ie_core.hpp
9  */
10 #pragma once
11 
12 #include <istream>
13 #include <map>
14 #include <memory>
15 #include <string>
16 #include <vector>
17 
18 #include "ie_version.hpp"
19 #include "ie_extension.h"
20 #include "ie_plugin_config.hpp"
21 #include "ie_remote_context.hpp"
23 
24 namespace InferenceEngine {
25 
26 /**
27  * @brief This class represents Inference Engine Core entity.
28  *
29  * It can throw exceptions safely for the application, where it is properly handled.
30  */
31 class INFERENCE_ENGINE_API_CLASS(Core) {
32  class Impl;
33  std::shared_ptr<Impl> _impl;
34 
35 public:
36  /** @brief Constructs Inference Engine Core instance using XML configuration file with
37  * plugins description.
38  *
39  * See RegisterPlugins for more details.
40  *
41  * @param xmlConfigFile A path to .xml file with plugins to load from. If XML configuration file is not specified,
42  * then default Inference Engine plugins are loaded from the default plugin.xml file.
43  */
44  explicit Core(const std::string& xmlConfigFile = {});
45 
46  /**
47  * @brief Returns plugins version information
48  *
49  * @param deviceName Device name to identify plugin
50  * @return A vector of versions
51  */
52  std::map<std::string, Version> GetVersions(const std::string& deviceName) const;
53 
54 #ifdef ENABLE_UNICODE_PATH_SUPPORT
55  /**
56  * @brief Reads models from IR and ONNX formats
57  * @param modelPath path to model
58  * @param binPath path to data file
59  * For IR format (*.bin):
60  * * if path is empty, will try to read bin file with the same name as xml and
61  * * if bin file with the same name was not found, will load IR without weights.
62  * For ONNX format (*.onnx or *.prototxt):
63  * * binPath parameter is not used.
64  * @return CNNNetwork
65  */
66  CNNNetwork ReadNetwork(const std::wstring& modelPath, const std::wstring& binPath = {}) const;
67 #endif
68 
69  /**
70  * @brief Reads models from IR and ONNX formats
71  * @param modelPath path to model
72  * @param binPath path to data file
73  * For IR format (*.bin):
74  * * if path is empty, will try to read bin file with the same name as xml and
75  * * if bin file with the same name was not found, will load IR without weights.
76  * For ONNX format (*.onnx or *.prototxt):
77  * * binPath parameter is not used.
78  * @return CNNNetwork
79  */
80  CNNNetwork ReadNetwork(const std::string& modelPath, const std::string& binPath = {}) const;
81  /**
82  * @brief Reads models from IR and ONNX formats
83  * @param model string with model in IR or ONNX format
84  * @param weights shared pointer to constant blob with weights
85  * Reading ONNX models doesn't support loading weights from data blobs.
86  * If you are using an ONNX model with external data files, please use the
87  * `InferenceEngine::Core::ReadNetwork(const std::string& model, const Blob::CPtr& weights) const`
88  * function overload which takes a filesystem path to the model.
89  * For ONNX case the second parameter should contain empty blob.
90  * @note Created InferenceEngine::CNNNetwork object shares the weights with `weights` object.
91  * So, do not create `weights` on temporary data which can be later freed, since the network
92  * constant datas become to point to invalid memory.
93  * @return CNNNetwork
94  */
95  CNNNetwork ReadNetwork(const std::string& model, const Blob::CPtr& weights) const;
96 
97  /**
98  * @brief Creates an executable network from a network object.
99  *
100  * Users can create as many networks as they need and use
101  * them simultaneously (up to the limitation of the hardware resources)
102  *
103  * @param network CNNNetwork object acquired from Core::ReadNetwork
104  * @param deviceName Name of device to load network to
105  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
106  * operation
107  * @return An executable network reference
108  */
110  const CNNNetwork& network, const std::string& deviceName,
111  const std::map<std::string, std::string>& config = {});
112 
113  /**
114  * @brief Reads model and creates an executable network from IR or ONNX file
115  *
116  * This can be more efficient than using ReadNetwork + LoadNetwork(CNNNetwork) flow
117  * especially for cases when caching is enabled and cached model is available
118  *
119  * @param modelPath path to model
120  * @param deviceName Name of device to load network to
121  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
122  * operation/
123  *
124  * @return An executable network reference
125  */
127  const std::string& modelPath, const std::string& deviceName,
128  const std::map<std::string, std::string>& config = {});
129 
130  /**
131  * @brief Registers extension
132  * @param extension Pointer to already loaded extension
133  */
134  void AddExtension(const IExtensionPtr& extension);
135 
136  /**
137  * @brief Creates an executable network from a network object within a specified remote context.
138  * @param network CNNNetwork object acquired from Core::ReadNetwork
139  * @param context Pointer to RemoteContext object
140  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
141  * operation
142  * @return An executable network object
143  */
145  const CNNNetwork& network, RemoteContext::Ptr context,
146  const std::map<std::string, std::string>& config = {});
147 
148  /**
149  * @brief Registers extension for the specified plugin
150  *
151  * @param extension Pointer to already loaded extension
152  * @param deviceName Device name to identify plugin to add an executable extension
153  */
154  void AddExtension(IExtensionPtr extension, const std::string& deviceName);
155 
156  /**
157  * @brief Creates an executable network from a previously exported network
158  *
159  * @param modelFileName Path to the location of the exported file
160  * @param deviceName Name of device load executable network on
161  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
162  * operation*
163  * @return An executable network reference
164  */
166  const std::string& modelFileName, const std::string& deviceName,
167  const std::map<std::string, std::string>& config = {});
168 
169  /**
170  * @brief Creates an executable network from a previously exported network
171  * @param networkModel network model stream
172  * @param deviceName Name of device load executable network on
173  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
174  * operation*
175  * @return An executable network reference
176  */
177  ExecutableNetwork ImportNetwork(std::istream& networkModel, const std::string& deviceName,
178  const std::map<std::string, std::string>& config = {});
179 
180  /**
181  * @deprecated Use Core::ImportNetwork with explicit device name
182  * @brief Creates an executable network from a previously exported network
183  * @param networkModel network model stream
184  * @return An executable network reference
185  */
186  INFERENCE_ENGINE_DEPRECATED("Use Core::ImportNetwork with explicit device name")
187  ExecutableNetwork ImportNetwork(std::istream& networkModel);
188 
189  /**
190  * @brief Creates an executable network from a previously exported network within a specified
191  * remote context.
192  *
193  * @param networkModel Network model stream
194  * @param context Pointer to RemoteContext object
195  * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load
196  * operation
197  * @return An executable network reference
198  */
199  ExecutableNetwork ImportNetwork(std::istream& networkModel,
200  const RemoteContext::Ptr& context,
201  const std::map<std::string, std::string>& config = {});
202 
203  /**
204  * @brief Query device if it supports specified network with specified configuration
205  *
206  * @param deviceName A name of a device to query
207  * @param network Network object to query
208  * @param config Optional map of pairs: (config parameter name, config parameter value)
209  * @return An object containing a map of pairs a layer name -> a device name supporting this layer.
210  */
212  const CNNNetwork& network, const std::string& deviceName,
213  const std::map<std::string, std::string>& config = {}) const;
214 
215  /**
216  * @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp
217  *
218  * @param deviceName An optional name of a device. If device name is not specified, the config is set for all the
219  * registered devices.
220  *
221  * @param config Map of pairs: (config parameter name, config parameter value)
222  */
223  void SetConfig(const std::map<std::string, std::string>& config, const std::string& deviceName = {});
224 
225  /**
226  * @brief Gets configuration dedicated to device behaviour.
227  *
228  * The method is targeted to extract information which can be set via SetConfig method.
229  *
230  * @param deviceName - A name of a device to get a configuration value.
231  * @param name - config key.
232  * @return Value of config corresponding to config key.
233  */
234  Parameter GetConfig(const std::string& deviceName, const std::string& name) const;
235 
236  /**
237  * @brief Gets general runtime metric for dedicated hardware.
238  *
239  * The method is needed to request common device properties
240  * which are executable network agnostic. It can be device name, temperature, other devices-specific values.
241  *
242  * @param deviceName - A name of a device to get a metric value.
243  * @param name - metric name to request.
244  * @return Metric value corresponding to metric key.
245  */
246  Parameter GetMetric(const std::string& deviceName, const std::string& name) const;
247 
248  /**
249  * @brief Returns devices available for neural networks inference
250  *
251  * @return A vector of devices. The devices are returned as { CPU, FPGA.0, FPGA.1, MYRIAD }
252  * If there more than one device of specific type, they are enumerated with .# suffix.
253  */
254  std::vector<std::string> GetAvailableDevices() const;
255 
256  /**
257  * @brief Register new device and plugin which implement this device inside Inference Engine.
258  *
259  * @param pluginName A name of plugin. Depending on platform pluginName is wrapped with shared library suffix and
260  * prefix to identify library full name
261  *
262  * @param deviceName A device name to register plugin for. If device name is not specified, then it's taken from
263  * plugin itself.
264  */
265  void RegisterPlugin(const std::string& pluginName, const std::string& deviceName);
266 
267  /**
268  * @brief Unloads previously loaded plugin with a specified name from Inference Engine
269  * The method is needed to remove plugin instance and free its resources. If plugin for a
270  * specified device has not been created before, the method throws an exception.
271  *
272  * @param deviceName Device name identifying plugin to remove from Inference Engine
273  */
274  void UnregisterPlugin(const std::string& deviceName);
275 
276  /** @brief Registers plugin to Inference Engine Core instance using XML configuration file with
277  * plugins description.
278  *
279  * XML file has the following structure:
280  *
281  * ```xml
282  * <ie>
283  * <plugins>
284  * <plugin name="" location="">
285  * <extensions>
286  * <extension location=""/>
287  * </extensions>
288  * <properties>
289  * <property key="" value=""/>
290  * </properties>
291  * </plugin>
292  * </plugins>
293  * </ie>
294  * ```
295  *
296  * - `name` identifies name of device enabled by plugin
297  * - `location` specifies absolute path to dynamic library with plugin. A path can also be relative to inference
298  * engine shared library. It allows to have common config for different systems with different configurations.
299  * - Properties are set to plugin via the `SetConfig` method.
300  * - Extensions are set to plugin via the `AddExtension` method.
301  *
302  * @param xmlConfigFile A path to .xml file with plugins to register.
303  */
304  void RegisterPlugins(const std::string& xmlConfigFile);
305 
306  /**
307  * @brief Create a new shared context object on specified accelerator device
308  * using specified plugin-specific low level device API parameters (device handle, pointer, etc.)
309  * @param deviceName Name of a device to create new shared context on.
310  * @param params Map of device-specific shared context parameters.
311  * @return A shared pointer to a created remote context.
312  */
313  RemoteContext::Ptr CreateContext(const std::string& deviceName, const ParamMap& params);
314 
315  /**
316  * @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device.
317  * @param deviceName - A name of a device to get create shared context from.
318  * @return A shared pointer to a default remote context.
319  */
320  RemoteContext::Ptr GetDefaultContext(const std::string& deviceName);
321 };
322 } // namespace InferenceEngine
std::shared_ptr< const Blob > CPtr
A smart pointer to the const Blob object.
Definition: ie_blob.h:47
This class contains all the information about the Neural Network and the related binary information.
Definition: ie_cnn_network.h:35
QueryNetworkResult QueryNetwork(const CNNNetwork &network, const std::string &deviceName, const std::map< std::string, std::string > &config={}) const
Query device if it supports specified network with specified configuration.
ExecutableNetwork ImportNetwork(std::istream &networkModel, const std::string &deviceName, const std::map< std::string, std::string > &config={})
Creates an executable network from a previously exported network.
CNNNetwork ReadNetwork(const std::string &model, const Blob::CPtr &weights) const
Reads models from IR and ONNX formats.
void SetConfig(const std::map< std::string, std::string > &config, const std::string &deviceName={})
Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp.
RemoteContext::Ptr GetDefaultContext(const std::string &deviceName)
Get a pointer to default(plugin-supplied) shared context object for specified accelerator device.
Parameter GetConfig(const std::string &deviceName, const std::string &name) const
Gets configuration dedicated to device behaviour.
void RegisterPlugin(const std::string &pluginName, const std::string &deviceName)
Register new device and plugin which implement this device inside Inference Engine.
ExecutableNetwork LoadNetwork(const std::string &modelPath, const std::string &deviceName, const std::map< std::string, std::string > &config={})
Reads model and creates an executable network from IR or ONNX file.
void UnregisterPlugin(const std::string &deviceName)
Unloads previously loaded plugin with a specified name from Inference Engine The method is needed to ...
void RegisterPlugins(const std::string &xmlConfigFile)
Registers plugin to Inference Engine Core instance using XML configuration file with plugins descript...
ExecutableNetwork LoadNetwork(const CNNNetwork &network, const std::string &deviceName, const std::map< std::string, std::string > &config={})
Creates an executable network from a network object.
std::map< std::string, Version > GetVersions(const std::string &deviceName) const
Returns plugins version information.
void AddExtension(IExtensionPtr extension, const std::string &deviceName)
Registers extension for the specified plugin.
void AddExtension(const IExtensionPtr &extension)
Registers extension.
ExecutableNetwork LoadNetwork(const CNNNetwork &network, RemoteContext::Ptr context, const std::map< std::string, std::string > &config={})
Creates an executable network from a network object within a specified remote context.
Core(const std::string &xmlConfigFile={})
Constructs Inference Engine Core instance using XML configuration file with plugins description.
CNNNetwork ReadNetwork(const std::string &modelPath, const std::string &binPath={}) const
Reads models from IR and ONNX formats.
std::vector< std::string > GetAvailableDevices() const
Returns devices available for neural networks inference.
Parameter GetMetric(const std::string &deviceName, const std::string &name) const
Gets general runtime metric for dedicated hardware.
ExecutableNetwork ImportNetwork(const std::string &modelFileName, const std::string &deviceName, const std::map< std::string, std::string > &config={})
Creates an executable network from a previously exported network.
RemoteContext::Ptr CreateContext(const std::string &deviceName, const ParamMap &params)
Create a new shared context object on specified accelerator device using specified plugin-specific lo...
This is an interface of an executable network.
Definition: ie_executable_network.hpp:32
This class represents an object to work with different parameters.
Definition: ie_parameter.hpp:36
This class represents an Inference Engine abstraction for remote (non-CPU) accelerator device-specifi...
Definition: ie_remote_context.hpp:89
std::shared_ptr< RemoteContext > Ptr
A smart pointer to the RemoteContext object.
Definition: ie_remote_context.hpp:94
A header file that provides ExecutableNetwork class.
A header file that defines a wrapper class for handling extension instantiation and releasing resourc...
A header for advanced hardware related properties for Inference Engine plugins To use in SetConfig,...
This is a header file for the IE RemoteContext and RemoteBlob classes.
A header file that provides versioning information for the Inference Engine library.
Inference Engine C++ API.
Definition: cldnn_config.hpp:17
std::map< std::string, Parameter > ParamMap
An std::map object containing parameters.
Definition: ie_parameter.hpp:341
std::shared_ptr< IExtension > IExtensionPtr
A shared pointer to a IExtension interface.
Definition: ie_iextension.h:204
Response structure encapsulating information about supported layer.
Definition: ie_common.h:265