blob: 89b4776d39e4004df46e2ada68392db94713832a [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Finn Williamsf24effa2020-07-03 10:12:03 +01007#include <armnn/BackendOptions.hpp>
Jim Flynn906f9462019-05-10 13:55:21 +01008#include <armnn/Deprecated.hpp>
David Beckf0b48452018-10-19 15:20:56 +01009#include <armnn/DescriptorsFwd.hpp>
jimfly01e9e7bfd2019-01-24 22:29:33 +000010#include <armnn/ILayerVisitor.hpp>
Finn Williamsb454c5c2021-02-09 15:56:23 +000011#include <armnn/IStrategy.hpp>
Matthew Bentham313e1c82019-03-25 17:37:47 +000012#include <armnn/NetworkFwd.hpp>
13#include <armnn/Optional.hpp>
14#include <armnn/TensorFwd.hpp>
Jan Eilers6a71bb52021-10-26 17:41:18 +010015#include <armnn/Logging.hpp>
Nikhil Raj2e241752022-02-01 16:42:15 +000016#include <armnn/backends/TensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
18#include <memory>
telsoa01c577f2c2018-08-31 09:22:23 +010019#include <vector>
telsoa014fcda012018-03-09 14:13:49 +000020
21namespace armnn
22{
telsoa014fcda012018-03-09 14:13:49 +000023/// @brief An input connection slot for a layer.
24/// The input slot can be connected to an output slot of the preceding layer in the graph.
25/// Only one connection to the input slot is allowed.
26class IInputSlot
27{
28public:
29 virtual const IOutputSlot* GetConnection() const = 0;
30 virtual IOutputSlot* GetConnection() = 0;
Francis Murtagh9d74ba62022-01-19 16:31:58 +000031 virtual const IConnectableLayer& GetOwningIConnectableLayer() const = 0;
telsoa014fcda012018-03-09 14:13:49 +000032
33protected:
telsoa01c577f2c2018-08-31 09:22:23 +010034 /// Not user deletable.
35 ~IInputSlot() {}
telsoa014fcda012018-03-09 14:13:49 +000036};
37
38/// @brief An output connection slot for a layer.
39/// The output slot may be connected to 1 or more input slots of subsequent layers in the graph.
40class IOutputSlot
41{
42public:
43 virtual unsigned int GetNumConnections() const = 0;
44 virtual const IInputSlot* GetConnection(unsigned int index) const = 0;
Keith Davisb4dd5cc2022-04-07 11:32:00 +010045 virtual IInputSlot* GetConnection(unsigned int outputindex) = 0;
telsoa014fcda012018-03-09 14:13:49 +000046
47 virtual void SetTensorInfo(const TensorInfo& tensorInfo) = 0;
48 virtual const TensorInfo& GetTensorInfo() const = 0;
49 virtual bool IsTensorInfoSet() const = 0;
50
51 virtual int Connect(IInputSlot& destination) = 0;
52 virtual void Disconnect(IInputSlot& slot) = 0;
53
Mike Kelly8c1701a2019-02-11 17:01:27 +000054 virtual unsigned int CalculateIndexOnOwner() const = 0;
55
56 virtual LayerGuid GetOwningLayerGuid() const = 0;
57
Francis Murtagh56ccf682021-12-13 18:48:12 +000058 virtual const IConnectableLayer& GetOwningIConnectableLayer() const = 0;
59
telsoa014fcda012018-03-09 14:13:49 +000060protected:
telsoa01c577f2c2018-08-31 09:22:23 +010061 /// Not user deletable.
62 ~IOutputSlot() {}
telsoa014fcda012018-03-09 14:13:49 +000063};
64
65/// @brief Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
66class IConnectableLayer
67{
68public:
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000069 /// Returns the name of the layer
telsoa014fcda012018-03-09 14:13:49 +000070 virtual const char* GetName() const = 0;
71
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000072 /// Returns the number of connectable input slots
telsoa014fcda012018-03-09 14:13:49 +000073 virtual unsigned int GetNumInputSlots() const = 0;
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000074
75 /// Returns the number of connectable output slots
telsoa014fcda012018-03-09 14:13:49 +000076 virtual unsigned int GetNumOutputSlots() const = 0;
77
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000078 /// Get a const input slot handle by slot index
telsoa014fcda012018-03-09 14:13:49 +000079 virtual const IInputSlot& GetInputSlot(unsigned int index) const = 0;
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000080
81 /// Get the input slot handle by slot index
telsoa014fcda012018-03-09 14:13:49 +000082 virtual IInputSlot& GetInputSlot(unsigned int index) = 0;
83
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000084 /// Get the const output slot handle by slot index
telsoa014fcda012018-03-09 14:13:49 +000085 virtual const IOutputSlot& GetOutputSlot(unsigned int index) const = 0;
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000086
87 /// Get the output slot handle by slot index
telsoa014fcda012018-03-09 14:13:49 +000088 virtual IOutputSlot& GetOutputSlot(unsigned int index) = 0;
89
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000090 /// Infer the shape of the output(s) based on the provided input shape(s)
telsoa01c577f2c2018-08-31 09:22:23 +010091 virtual std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const = 0;
92
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000093 /// Returns the unique id of the layer
surmeh01bceff2f2018-03-29 16:29:27 +010094 virtual LayerGuid GetGuid() const = 0;
jimfly01e9e7bfd2019-01-24 22:29:33 +000095
Jan Eilers1b2654f2021-09-24 15:45:46 +010096 // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
97 // the deprecated ILayerVisitor which is used in the function.
98 ARMNN_NO_DEPRECATE_WARN_BEGIN
Derek Lamberti4a9e24b2020-01-03 16:53:38 +000099 /// Apply a visitor to this layer
Jan Eilers1b2654f2021-09-24 15:45:46 +0100100 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
101 "Accept function is deprecated. Use IStrategy in combination with "
102 "ExecuteStrategy instead, which is an ABI/API stable version of the "
103 "visitor pattern.",
104 "22.05")
jimfly01e9e7bfd2019-01-24 22:29:33 +0000105 virtual void Accept(ILayerVisitor& visitor) const = 0;
Jan Eilers1b2654f2021-09-24 15:45:46 +0100106 ARMNN_NO_DEPRECATE_WARN_END
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000107
Finn Williamsb454c5c2021-02-09 15:56:23 +0000108 /// Apply a visitor to this layer
109 virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
110
Derek Lamberti4a9e24b2020-01-03 16:53:38 +0000111 /// Provide a hint for the optimizer as to which backend to prefer for this layer
112 virtual void BackendSelectionHint(Optional<BackendId> backend) = 0;
Finn Williamsb454c5c2021-02-09 15:56:23 +0000113
114 /// Returns the armnn::LayerType of this layer
115 virtual LayerType GetType() const = 0;
116
Jim Flynne4665962022-01-31 16:08:53 +0000117 /// If the layer has a descriptor return it.
118 /// The base descriptor can then be cast to the correct descriptor class.
119 /// If the layer has no associated descriptor a struct of type NullDescriptor will be returned.
120 /// Note: NullDescriptors can be detected because they return true when
121 /// the BaseDescriptor IsNull function is invoked.
122 virtual const BaseDescriptor& GetParameters() const = 0;
123
Nikhil Raj2e241752022-02-01 16:42:15 +0000124 using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
125
126 // Returns ConstantTensors of this Layer if it has any, otherwise returns empty vector.
127 virtual ConstantTensors GetConstantTensorsByRef() = 0;
128
telsoa014fcda012018-03-09 14:13:49 +0000129protected:
telsoa01c577f2c2018-08-31 09:22:23 +0100130 /// Objects are not deletable via the handle
131 ~IConnectableLayer() {}
telsoa014fcda012018-03-09 14:13:49 +0000132};
133
telsoa014fcda012018-03-09 14:13:49 +0000134
Jan Eilersb1c62f12021-10-26 14:56:47 +0100135/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
136/// a set of features that allows the user to customize this optimization on a per model basis.
telsoa01c577f2c2018-08-31 09:22:23 +0100137struct OptimizerOptions
138{
Matteo Martincigh49124022019-01-11 13:25:59 +0000139 OptimizerOptions()
140 : m_ReduceFp32ToFp16(false)
141 , m_Debug(false)
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000142 , m_ReduceFp32ToBf16(false)
Teresa Charlincdc01492020-06-09 18:00:20 +0100143 , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100144 , m_ImportEnabled(false)
Sadik Armagan045f6be2020-09-10 13:37:32 +0100145 , m_ModelOptions()
Derek Lambertif1e0ad32021-10-13 18:02:25 +0100146 , m_ProfilingEnabled(false)
keidav01738c2e62018-12-11 16:14:20 +0000147 {}
telsoa01c577f2c2018-08-31 09:22:23 +0100148
Sadik Armagan045f6be2020-09-10 13:37:32 +0100149 OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
James Conroya0f8b152022-06-21 11:31:47 +0000150 ModelOptions modelOptions = {})
Narumol Prangnawaratea063df2020-08-21 10:03:49 +0100151 : m_ReduceFp32ToFp16(reduceFp32ToFp16)
152 , m_Debug(debug)
153 , m_ReduceFp32ToBf16(reduceFp32ToBf16)
154 , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
155 , m_ImportEnabled(importEnabled)
Sadik Armagan045f6be2020-09-10 13:37:32 +0100156 , m_ModelOptions(modelOptions)
Derek Lambertif1e0ad32021-10-13 18:02:25 +0100157 , m_ProfilingEnabled(false)
Narumol Prangnawaratea063df2020-08-21 10:03:49 +0100158 {
159 if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
160 {
161 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
162 }
163 }
164
Teresa Charlincdc01492020-06-09 18:00:20 +0100165 OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100166 ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
James Conroya0f8b152022-06-21 11:31:47 +0000167 bool importEnabled = false, ModelOptions modelOptions = {})
telsoa01c577f2c2018-08-31 09:22:23 +0100168 : m_ReduceFp32ToFp16(reduceFp32ToFp16)
keidav01738c2e62018-12-11 16:14:20 +0000169 , m_Debug(debug)
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000170 , m_ReduceFp32ToBf16(reduceFp32ToBf16)
Teresa Charlincdc01492020-06-09 18:00:20 +0100171 , m_shapeInferenceMethod(shapeInferenceMethod)
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100172 , m_ImportEnabled(importEnabled)
Sadik Armagan045f6be2020-09-10 13:37:32 +0100173 , m_ModelOptions(modelOptions)
Derek Lambertif1e0ad32021-10-13 18:02:25 +0100174 , m_ProfilingEnabled(false)
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000175 {
176 if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
177 {
178 throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
179 }
180 }
telsoa01c577f2c2018-08-31 09:22:23 +0100181
Jan Eilers6a71bb52021-10-26 17:41:18 +0100182 const std::string ToString() const
183 {
184 std::stringstream stream;
185 stream << "OptimizerOptions: \n";
186 stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
187 stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
Jan Eilers17d34da2021-12-08 16:15:12 +0000188 stream << "\tDebug: " << m_Debug << "\n";
189 stream << "\tShapeInferenceMethod: " <<
Jan Eilers6a71bb52021-10-26 17:41:18 +0100190 (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
191 stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
192 stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
193
194 stream << "\tModelOptions: \n";
195 for (auto optionsGroup : m_ModelOptions)
196 {
197 for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
198 {
199 const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
Jan Eilers17d34da2021-12-08 16:15:12 +0000200 stream << "\t\tBackend: " << optionsGroup.GetBackendId() << "\n"
201 << "\t\t\tOption: " << option.GetName() << "\n"
202 << "\t\t\tValue: " << std::string(option.GetValue().ToString()) << "\n";
Jan Eilers6a71bb52021-10-26 17:41:18 +0100203 }
204 }
205
206 return stream.str();
207 }
208
Jan Eilersb1c62f12021-10-26 14:56:47 +0100209 /// Reduces all Fp32 operators in the model to Fp16 for faster processing.
210 /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
211 /// between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16.
212 /// The overhead of these conversions can lead to a slower overall performance if too many conversions are
213 /// required.
telsoa01c577f2c2018-08-31 09:22:23 +0100214 bool m_ReduceFp32ToFp16;
keidav01738c2e62018-12-11 16:14:20 +0000215
216 // Add debug data for easier troubleshooting
217 bool m_Debug;
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000218
Jan Eilersb1c62f12021-10-26 14:56:47 +0100219 /// Reduces all Fp32 operators in the model to Bf16 for faster processing.
220 /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
221 /// between layers that weren't in Fp32 in the first place or if the operator is not supported in Bf16.
222 /// The overhead of these conversions can lead to a slower overall performance if too many conversions are
223 /// required.
Narumol Prangnawaratbc7ffb52020-03-20 15:01:01 +0000224 bool m_ReduceFp32ToBf16;
Teresa Charlincdc01492020-06-09 18:00:20 +0100225
226 // Infer output size when not available
227 ShapeInferenceMethod m_shapeInferenceMethod;
Narumol Prangnawarata2493a02020-08-19 14:39:07 +0100228
229 // Enable Import
230 bool m_ImportEnabled;
Sadik Armagan045f6be2020-09-10 13:37:32 +0100231
232 // Enable Model Options
233 ModelOptions m_ModelOptions;
Derek Lambertif1e0ad32021-10-13 18:02:25 +0100234
235 // Enable profiling dump of the optimizer phase
236 bool m_ProfilingEnabled;
telsoa01c577f2c2018-08-31 09:22:23 +0100237};
telsoa014fcda012018-03-09 14:13:49 +0000238
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000239class IWorkloadFactory;
240class NetworkImpl;
241using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
242using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
243
Cathal Corbett18655b82021-12-13 13:03:22 +0000244using CompiledBlobDeleter = std::function<void(const void*)>;
245using CompiledBlobPtr = std::unique_ptr<void, CompiledBlobDeleter>;
246
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000247/// Main network class which provides the interface for building up a neural network.
248/// This object is subsequently required by the IRuntime::Load() method.
249class INetwork
250{
251public:
252 static INetwork* CreateRaw(NetworkOptions networkOptions = {});
253 static INetworkPtr Create(NetworkOptions networkOptions = {});
254 static void Destroy(INetwork* network);
255
256 Status PrintGraph();
257
258 /// Adds an input layer to the network.
259 /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified.
260 /// when passing the inputs to the IRuntime::EnqueueWorkload() function.
261 /// @param name - Optional name for the layer.
262 /// @return - Interface for configuring the layer.
263 IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
264
265 /// Adds an ArgMinMax layer to the network.
266 /// @param desc - Parameters for the L2 normalization operation.
267 /// @param name - Optional name for the layer.
268 /// @return - Interface for configuring the layer.
269 IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
270 const char* name = nullptr);
271
mathad01b392e982021-04-07 12:07:30 +0100272 /// Adds a cast layer to the network.
273 /// @param name - Optional name for the layer.
274 /// @return - Interface for configuring the layer.
275 IConnectableLayer* AddCastLayer(const char* name = nullptr);
276
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000277 /// Add a Comparison layer to the network.
278 /// @param name - Optional name for the layer.
279 /// @param desc - Descriptor for the comparison operation.
280 /// @return - Interface for configuring the layer.
281 IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
282 const char* name = nullptr);
283
284 /// Adds a concatenation layer to the network.
285 /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
286 /// process. Number of Views must be equal to the number of inputs, and their order
287 /// must match - e.g. first view corresponds to the first input, second view to the
288 /// second input, etc....
289 /// @param name - Optional name for the layer.
290 /// @return - Interface for configuring the layer.
291 IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
292 const char* name = nullptr);
293
294 /// Adds a 2D convolution layer to the network.
295 /// @param convolution2dDescriptor - Description of the 2D convolution layer.
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100296 /// @param name - Optional name for the layer.
297 /// @return - Interface for configuring the layer.
298 IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
299 const char* name = nullptr);
300
301 /// Adds a 2D convolution layer to the network.
302 /// @param convolution2dDescriptor - Description of the 2D convolution layer.
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000303 /// @param weights - Tensor for the weights data.
304 /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
305 /// @param name - Optional name for the layer.
306 /// @return - Interface for configuring the layer.
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100307 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000308 IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
309 const ConstTensor& weights,
310 const Optional<ConstTensor>& biases,
311 const char* name = nullptr);
312
Jan Eilers1b2654f2021-09-24 15:45:46 +0100313 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000314 IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
315 const ConstTensor& weights,
316 const char* name = nullptr);
317
Jan Eilers1b2654f2021-09-24 15:45:46 +0100318 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This AddConvolution2dLayer overload is deprecated", "22.08")
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000319 IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
320 const ConstTensor& weights,
321 const ConstTensor& biases,
322 const char* name = nullptr);
323
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100324 /// Adds a 3D convolution layer to the network.
325 /// @param convolution3dDescriptor - Description of the 3D convolution layer.
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100326 /// @param name - Optional name for the layer.
327 /// @return - Interface for configuring the layer.
328 IConnectableLayer* AddConvolution3dLayer(const Convolution3dDescriptor& convolution3dDescriptor,
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100329 const char* name = nullptr);
330
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000331 /// Adds a depth to space layer to the network.
332 /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
333 /// @param name - Optional name for the layer.
334 /// @return - Interface for configuring the layer.
335 IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
336 const char* name = nullptr);
337
338 /// Adds a 2D depthwise convolution layer to the network.
339 /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
Cathal Corbett06902652022-04-14 17:55:11 +0100340 /// @param name - Optional name for the layer.
341 /// @return - Interface for configuring the layer.
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100342 IConnectableLayer* AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
343 const char* name = nullptr);
Cathal Corbett06902652022-04-14 17:55:11 +0100344
345 /// Adds a 2D depthwise convolution layer to the network.
346 /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000347 /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
348 /// @param biases Optional tensor for the bias data. If specified, must match the output tensor shape.
349 /// @param name - Optional name for the layer.
350 /// @return - Interface for configuring the layer.
Cathal Corbett06902652022-04-14 17:55:11 +0100351 ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000352 IConnectableLayer* AddDepthwiseConvolution2dLayer(
353 const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
354 const ConstTensor& weights,
355 const Optional<ConstTensor>& biases,
356 const char* name = nullptr);
357
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000358 /// Adds a Dequantize layer to the network.
359 /// @return - Interface for configuring the layer.
360 IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
361
362 /// Adds a Detection PostProcess layer to the network.
363 /// @param descriptor - Description of the Detection PostProcess layer.
364 /// @param anchors - Tensor for anchors.
365 /// @param name - Optional name for the layer.
366 /// @return - Interface for configuring the layer.
367 IConnectableLayer* AddDetectionPostProcessLayer(
368 const DetectionPostProcessDescriptor& descriptor,
369 const ConstTensor& anchors,
370 const char* name = nullptr);
371
372 /// Add an ElementwiseUnary layer to the network.
373 /// @param name - Optional name for the layer.
374 /// @param desc - Descriptor for the elementwiseUnary operation.
375 /// @return - Interface for configuring the layer.
376 IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
377 const char* name = nullptr);
378
379 /// Add an Fill layer to the network.
380 /// @param name - Optional name for the layer.
381 /// @param fillDescriptor - Descriptor for the fill operation.
382 /// @return - Interface for configuring the layer.
383 IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
384 const char* name = nullptr);
385
Matthew Sloyan81beae32021-07-13 19:46:11 +0100386
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000387 /// Adds a fully connected layer to the network.
388 /// @param fullyConnectedDescriptor - Description of the fully connected layer.
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000389 /// @return - Interface for configuring the layer.
Matthew Sloyan57d2c7e2021-08-12 17:41:04 +0100390 ///
391 /// @note Weights and biases are passed in as inputs. If they are constant tensors you can simply store
392 /// them in a ConstantLayer as seen below. A full example can be found in samples/SimpleSample.cpp.
393 ///
394 /// @code
395 /// // Make sure the IsConstant flag is set on the weightsInfo before passing it to the ConstTensor.
396 /// ConstTensor weights(weightsInfo, weightsData);
397 ///
398 /// // Constant layer that now holds weights data for FullyConnected
399 /// IConnectableLayer* const constantWeightsLayer = myNetwork->AddConstantLayer(weights, "weights");
400 ///
401 /// FullyConnectedDescriptor fullyConnectedDesc;
402 /// IConnectableLayer* const fullyConnectedLayer = myNetwork->AddFullyConnectedLayer(fullyConnectedDesc,
403 /// "fully connected");
404 /// IConnectableLayer* InputLayer = myNetwork->AddInputLayer(0);
405 /// InputLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
406 /// constantWeightsLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(1));
407 /// @endcode
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000408 IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100409 const char* name = nullptr);
410
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000411 /// Adds a permute layer to the network.
412 /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
413 /// @param name - Optional name for the layer.
414 /// @return - Interface for configuring the layer.
415 IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
416 const char* name = nullptr);
417
418 /// Adds a batch to space ND layer to the network.
419 /// @param batchToSpaceNdDescriptor - Description of the layer.
420 /// @param name - Optional name for the layer.
421 /// @return - Interface for configuring the layer.
422 IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
423 const char* name = nullptr);
424
Tamás Nyíri7b885b32021-10-26 14:47:57 +0100425 /// Adds a 2D pooling layer to the network.
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000426 /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
427 /// @param name - Optional name for the layer.
428 /// @return - Interface for configuring the layer.
429 IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
430 const char* name = nullptr);
431
Tamás Nyíri7b885b32021-10-26 14:47:57 +0100432 /// Adds a 3D pooling layer to the network.
433 /// @param pooling3dDescriptor - Pooling3dDescriptor to configure the pooling.
434 /// @param name - Optional name for the layer.
435 /// @return - Interface for configuring the layer.
436 IConnectableLayer* AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
437 const char* name = nullptr);
438
Cathal Corbett18655b82021-12-13 13:03:22 +0000439 /// Adds a Precompiled layer to the network.
440 /// Method use is for backend users.
441 /// @param preCompiledDescriptor - PreCompiledDescriptor contains parameters for the Precompiled layer.
442 /// @param compiledBlobPtr - CompiledBlobPtr pre-compiled object set for the Precompiled layer.
443 /// @param backend - optional BackendId set for the Precompiled layer.
444 /// @return - Interface for configuring the layer.
445 IConnectableLayer* AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
Cathal Corbett3ea01072022-01-06 10:29:43 +0000446 CompiledBlobPtr compiledBlobPtr,
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000447 const Optional<BackendId>& backend,
448 const char* name = nullptr);
Cathal Corbett18655b82021-12-13 13:03:22 +0000449
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000450 /// Adds an activation layer to the network.
451 /// @param activationDescriptor - ActivationDescriptor to configure the activation.
452 /// @param name - Optional name for the layer.
453 /// @return - Interface for configuring the layer.
454 IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
455 const char* name = nullptr);
456
457 /// Adds a normalization layer to the network.
458 /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
459 /// @param name - Optional name for the layer.
460 /// @return - Interface for configuring the layer.
461 IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
462 const char* name = nullptr);
463
464 /// Adds a slice layer to the network.
465 /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
466 /// @param name - Optional name for the layer.
467 /// @return - Interface for configuring the layer.
468 IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
469
470 /// Adds a softmax layer to the network.
471 /// If the data type is QAsymm8, then the output quantization parameters
472 /// must have a scale of 1/256 and an offset of 0
473 /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
474 /// @param name - Optional name for the layer.
475 /// @return - Interface for configuring the layer.
476 IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
477 const char* name = nullptr);
478
479 /// Adds a splitter layer to the network.
480 /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
481 /// Number of Views must be equal to the number of outputs,
482 /// and their order must match - e.g. first view corresponds to
483 /// the first output, second view to the second output, etc....
484 /// @param name - Optional name for the layer.
485 /// @return - Interface for configuring the layer.
486 IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
487 const char* name = nullptr);
488
489 /// Adds a merge layer to the network.
490 /// @param name - Optional name for the layer.
491 /// @return - Interface for configuring the layer.
492 IConnectableLayer* AddMergeLayer(const char* name = nullptr);
493
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000494 /// Adds an addition layer to the network.
495 /// @param name - Optional name for the layer.
496 /// @return - Interface for configuring the layer.
497 IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
498
499 /// Adds a multiplication layer to the network.
500 /// @param name - Optional name for the layer.
501 /// @return - Interface for configuring the layer.
502 IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
503
504 /// Adds a batch normalization layer to the network.
505 /// @param mean - Pre-calculated mean for each channel.
506 /// @param variance - Pre-calculated variance for each channel.
507 /// @param beta - Per-channel additive factor.
508 /// @param gamma - Per-channel multiplicative factor.
509 /// @return - Interface for configuring the layer.
510 /// @param name - Optional name for the layer.
511 IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
512 const ConstTensor& mean,
513 const ConstTensor& variance,
514 const ConstTensor& beta,
515 const ConstTensor& gamma,
516 const char* name = nullptr);
517
518 /// Adds a rank layer to the network.
519 /// @param name - Optional name for the layer.
520 /// @return - Interface for configuring the layer.
521 IConnectableLayer* AddRankLayer(const char* name = nullptr);
522
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000523 /// Adds a resize layer to the network.
524 /// @param resizeDescriptor - Parameters for the resize operation.
525 /// @param name - Optional name for the layer.
526 /// @return - Interface for configuring the layer.
527 IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
528 const char* name = nullptr);
529
530 /// Adds a reduce layer to the network.
531 /// @param ReduceDescriptor - Parameters for the reduce operation.
532 /// @param name - Optional name for the layer.
533 /// @return - Interface for configuring the layer.
534 IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
535 const char* name = nullptr);
536
537 /// Adds an instance normalization layer to the network.
538 /// @param desc - Parameters for the instance normalization operation.
539 /// @param name - Optional name for the layer.
540 /// @return - Interface for configuring the layer.
541 IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
542 const char* name = nullptr);
543
544 /// Adds an L2 normalization layer to the network.
545 /// Normalization is performed along dimension 1, but requires a 4d input.
546 /// @param desc - Parameters for the L2 normalization operation.
547 /// @param name - Optional name for the layer.
548 /// @return - Interface for configuring the layer.
549 IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
550 const char* name = nullptr);
551
552 /// Adds a log softmax layer to the network.
553 /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
554 /// @param name - Optional name for the layer.
555 /// @return - Interface for configuring the layer.
556 IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
557 const char* name = nullptr);
558
559 /// Adds a layer with no inputs and a single output, which always corresponds to
560 /// the passed in constant tensor.
561 /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
562 /// its own copy of the tensor data, meaning the memory referenced by @a input can
563 /// be freed or reused after this function is called.
564 /// @param name - Optional name for the layer.
565 /// @return - Interface for configuring the layer.
566 IConnectableLayer* AddConstantLayer(const ConstTensor& input,
567 const char* name = nullptr);
568
569 /// Adds a reshape layer to the network.
570 /// @param reshapeDescriptor - Parameters for the reshape operation.
571 /// @param name - Optional name for the layer.
572 /// @return - Interface for configuring the layer.
573 IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
574 const char* name = nullptr);
575
Keith Davis3ae3f972021-05-21 16:33:48 +0100576 /// Adds a shape layer to the network.
577 /// @param name - Optional name for the layer.
578 /// @return - Interface for configuring the layer.
579 IConnectableLayer* AddShapeLayer(const char* name = nullptr);
580
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000581 /// Adds a space to batch layer to the network.
582 /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
583 /// @param name - Optional name for the layer.
584 /// @return - Interface for configuring the layer.
585 IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
586 const char* name = nullptr);
587
588 /// Adds a space to depth layer to the network.
589 /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
590 /// @param name - Optional name for the layer.
591 /// @return - Interface for configuring the layer.
592 IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
593 const char* name = nullptr);
594
595 /// Adds a floor layer to the network.
596 /// @param name - Optional name for the layer.
597 /// @return - Interface for configuring the layer.
598 IConnectableLayer* AddFloorLayer(const char* name = nullptr);
599
600 /// Adds an output layer to the network.
601 /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
602 /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
603 /// @param name - Optional name for the layer.
604 /// @return - Interface for configuring the layer.
605 IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
606
607 /// Add a Lstm layer to the network
608 /// @param descriptor - Parameters for the Lstm operation
609 /// @param params - Weights and biases for the LSTM cell
610 /// @param name - Optional name for the layer
611 /// @return - Interface for configuring the layer.
612 IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
613 const LstmInputParams& params,
614 const char* name = nullptr);
615
616 /// Adds a division layer to the network.
617 /// @param name - Optional name for the layer.
618 /// @return - Interface for configuring the layer.
619 IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
620
621 /// Adds a subtraction layer to the network.
622 /// @param name - Optional name for the layer.
623 /// @return - Interface for configuring the layer.
624 IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
625
626 /// Add a Maximum layer to the network.
627 /// @param name - Optional name for the layer.
628 /// @return - Interface for configuring the layer.
629 IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
630
631 /// Add a Mean layer to the network.
632 /// @param meanDescriptor - Parameters for the mean operation.
633 /// @param name - Optional name for the layer.
634 /// @return - Interface for configuring the layer.
635 IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
636
637 /// Adds a fully pad layer to the network.
638 /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
639 /// such that paddings[i,0] indicates the amount of padding to add in front of dimonsion i, and
640 /// paddings[i,1] indicates the amount of padding to add after the end of dimension i
641 /// @param name - Optional name for the layer.
642 /// @return - Interface for configuring the layer.
643 IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
644 const char* name = nullptr);
645
646 /// Add a quantize layer to the network
647 ///@param name - Optional name for the layer.
648 /// @return - Interface for configuring the layer.
649 IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
650
651 /// Adds a strided slice layer to the network.
652 /// @param StridedSliceDescriptor - Parameters for the strided slice operation.
653 /// @param name - Optional name for the layer.
654 /// @return - Interface for configuring the layer.
655 IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
656 const char* name = nullptr);
657
658 /// Add a Minimum layer to the network.
659 /// @param name - Optional name for the layer.
660 /// @return - Interface for configuring the layer.
661 IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
662
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000663 /// Add Gather layer to the network.
664 /// @param descriptor - Description of the gather layer.
665 /// @param name - Optional name for the layer.
666 /// @return - Interface for configuring the layer.
667 IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
668 const char* name = nullptr);
669
Teresa Charlinb2d3ec52022-04-12 22:07:09 +0100670 /// Add GatherNd layer to the network.
671 /// @param name - Optional name for the layer.
672 /// @return - Interface for configuring the layer.
673 IConnectableLayer* AddGatherNdLayer(const char* name = nullptr);
674
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000675 /// Adds a switch layer to the network.
676 /// @param name - Optional name for the layer.
677 /// @return - Interface for configuring the layer.
678 IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
679
680 /// Adds a PReLU layer to the network.
681 /// @param name - Optional name for the layer.
682 /// @return - Interface for configuring the layer.
683 IConnectableLayer* AddPreluLayer(const char* name = nullptr);
684
685 /// Adds a 2D transpose convolution layer to the network.
686 /// @param descriptor - Description of the 2D transpose convolution layer.
687 /// @param weights - Tensor for the weights data.
688 /// @param biases - Optional tensor for the bias data.
689 /// @param name - Optional name for the layer.
690 /// @return - Interface for configuring the layer.
691 IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
692 const ConstTensor& weights,
693 const Optional<ConstTensor>& biases,
694 const char* name = nullptr);
695
696 /// Adds a transpose layer to the network.
697 /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
698 /// @param name - Optional name for the layer.
699 /// @return - Interface for configuring the layer.
700 IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
701 const char* name = nullptr);
702
703 /// Adds a stack layer to the network.
704 /// @param descriptor - Description of the stack layer.
705 /// @param name - Optional name for the layer.
706 /// @return - Interface for configuring the layer.
707 IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
708 const char* name = nullptr);
709
710 /// Add a stand-in layer for a type unknown to the Arm NN framework.
711 /// Note: Due to the nature of this layer, no validation can be performed by the framework.
712 /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
713 /// tensor sizes cannot be inferred.
714 /// @descriptor - Descriptor for the StandIn layer.
715 /// @return - Interface for configuring the layer.
716 IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
717 const char* name = nullptr);
718
719 /// Add a QuantizedLstm layer to the network
720 /// @param params - The weights and biases for the Quantized LSTM cell
721 /// @param name - Optional name for the layer
722 /// @return - Interface for configuring the layer.
723 IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
724 const char* name = nullptr);
725
726 /// Add a QLstm layer to the network
727 /// @param descriptor - Parameters for the QLstm operation
728 /// @param params - Weights and biases for the layer
729 /// @param name - Optional name for the layer
730 /// @return - Interface for configuring the layer.
731 IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
732 const LstmInputParams& params,
733 const char* name = nullptr);
734
735 /// Adds a Logical Binary layer to the network.
736 /// @param descriptor - Description of the Logical Binary layer.
737 /// @param name - Optional name for the layer.
738 /// @return - Interface for configuring the layer.
739 IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
740 const char* name = nullptr);
741
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +0100742 /// Add a UnidirectionalSequenceLstm layer to the network
743 /// @param descriptor - Parameters for the UnidirectionalSequenceLstm operation
744 /// @param params - Weights and biases for the UnidirectionalSequenceLstm
745 /// @param name - Optional name for the layer
746 /// @return - Interface for configuring the layer.
747 IConnectableLayer* AddUnidirectionalSequenceLstmLayer(const UnidirectionalSequenceLstmDescriptor& descriptor,
748 const LstmInputParams& params,
749 const char* name = nullptr);
750
Simon Obute51f67772021-09-03 15:50:13 +0100751 /// Add a ChannelShuffle layer to the network
752 /// @param descriptor - Parameters for the ChannelShuffle operation
753 /// @param name - Optional name for the layer
754 /// @return - Interface for configuring the layer
755 IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& descriptor,
756 const char* name = nullptr);
757
Jan Eilers1b2654f2021-09-24 15:45:46 +0100758 // The Accept function needs to be wrapped in a no warn macro to avoid deprecation warnings from
759 // the deprecated ILayerVisitor which is used in the function.
760 ARMNN_NO_DEPRECATE_WARN_BEGIN
761 /// Apply a visitor to this layer
762 ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Accept is deprecated. The ILayerVisitor that works in conjunction with this "
763 "Accept function is deprecated. Use IStrategy in combination with "
764 "ExecuteStrategy instead, which is an ABI/API stable version of the "
765 "visitor pattern.",
766 "22.05")
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000767 void Accept(ILayerVisitor& visitor) const;
Jan Eilers1b2654f2021-09-24 15:45:46 +0100768 ARMNN_NO_DEPRECATE_WARN_END
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000769
770 void ExecuteStrategy(IStrategy& strategy) const;
771
772protected:
773 ~INetwork();
774
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000775 friend void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy);
776 friend class TestConnectionPreservation;
777 friend TensorInfo GetInputTensorInfo(const INetwork* network);
778 friend IOptimizedNetworkPtr Optimize(const INetwork& network,
779 const std::vector<BackendId>& backendPreferences,
780 const IDeviceSpec& deviceSpec,
781 const OptimizerOptions& options,
782 Optional<std::vector<std::string>&> messages);
783
784 INetwork(NetworkOptions networkOptions = {});
785
786 std::unique_ptr<NetworkImpl> pNetworkImpl;
787};
788
Mike Kelly386ff1a2021-03-29 15:04:50 +0100789namespace experimental
790{
Sadik Armagana0042512021-03-30 11:05:36 +0100791class AsyncNetworkImpl;
Mike Kelly386ff1a2021-03-29 15:04:50 +0100792class WorkingMemHandle;
793}
794
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000795struct BackendSettings;
796struct OptimizationResult;
797class OptimizedNetworkImpl;
Derek Lambertie155bbf2021-10-13 14:32:12 +0100798class IProfiler;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000799class IOptimizedNetwork
800{
801public:
802 static void Destroy(IOptimizedNetwork* network);
803
804 Status PrintGraph();
805 Status SerializeToDot(std::ostream& stream) const;
806
Cathal Corbett5aa9fd72022-02-25 15:33:28 +0000807 arm::pipe::ProfilingGuid GetGuid() const;
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000808
Sadik Armaganb7851f92021-10-06 16:37:02 +0100809 size_t GetNumInputs() const;
810 size_t GetNumOutputs() const;
811
Mike Kelly0d677db2021-06-27 22:39:21 +0100812 // Creates a copy of the IOptimizedNetwork. The IOptimizedNetwork will not be reoptimized,
813 // the provided ModelOptions will only be used when creating a LoadedNetwork.
814 IOptimizedNetwork(const IOptimizedNetwork& other, const ModelOptions& modelOptions);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000815 IOptimizedNetwork(std::unique_ptr<Graph> graph);
816 IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl);
817 ~IOptimizedNetwork();
818
Derek Lambertie155bbf2021-10-13 14:32:12 +0100819 const std::shared_ptr<IProfiler>& GetProfiler() const;
820
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000821protected:
822 friend class LoadedNetwork;
Mike Kelly386ff1a2021-03-29 15:04:50 +0100823
Sadik Armagana0042512021-03-30 11:05:36 +0100824 friend class experimental::AsyncNetworkImpl;
Mike Kelly386ff1a2021-03-29 15:04:50 +0100825 friend class experimental::WorkingMemHandle;
826
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000827 friend Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
828 friend ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
829 friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
830 const std::vector<BackendId>& backendPreferences,
831 const IDeviceSpec& deviceSpec,
832 const OptimizerOptions& options,
833 Optional<std::vector<std::string>&> messages);
Cathal Corbetta3f4fba2022-03-21 09:27:08 +0000834 friend IOptimizedNetworkPtr Optimize(const Graph& inGraph,
835 const std::vector<BackendId>& backendPreferences,
836 const IDeviceSpec& deviceSpec,
837 const OptimizerOptions& options,
838 Optional<std::vector<std::string>&> messages);
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000839
Francis Murtagh3d2b4b22021-02-15 18:23:17 +0000840 IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
841
842 std::unique_ptr<OptimizedNetworkImpl> pOptimizedNetworkImpl;
843};
844
telsoa014fcda012018-03-09 14:13:49 +0000845/// Create an optimized version of the network
846/// @param network INetwork description of the network to be optimized.
telsoa01c577f2c2018-08-31 09:22:23 +0100847/// @param backendPreferences The choice of the backend ordered by user preferences.
848/// @param deviceSpec DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec()
Rob Hughes23214432019-11-05 11:27:36 +0000849/// @param messages If there are failures or warnings a string describing same will be added to the vector
telsoa01c577f2c2018-08-31 09:22:23 +0100850/// @param options OptimizerOptions object with optimizer configuration options
telsoa014fcda012018-03-09 14:13:49 +0000851/// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from
852/// armnn::Exception if process fails.
telsoa014fcda012018-03-09 14:13:49 +0000853
telsoa01c577f2c2018-08-31 09:22:23 +0100854IOptimizedNetworkPtr Optimize(const INetwork& network,
David Beckf0b48452018-10-19 15:20:56 +0100855 const std::vector<BackendId>& backendPreferences,
telsoa01c577f2c2018-08-31 09:22:23 +0100856 const IDeviceSpec& deviceSpec,
jimfly016b0b53d2018-10-08 14:43:01 +0100857 const OptimizerOptions& options = OptimizerOptions(),
Rob Hughes23214432019-11-05 11:27:36 +0000858 Optional<std::vector<std::string>&> messages = EmptyOptional());
Cathal Corbetta3f4fba2022-03-21 09:27:08 +0000859
860/// Create an optimized version of the network
861/// @param inGraph Graph to be optimized.
862/// @param backendPreferences The choice of the backend ordered by user preferences.
863/// @param deviceSpec DeviceSpec object as queried from the runtime. See IRuntime::GetDeviceSpec()
864/// @param messages If there are failures or warnings a string describing same will be added to the vector
865/// @param options OptimizerOptions object with optimizer configuration options
866/// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from
867/// armnn::Exception if process fails.
868
869IOptimizedNetworkPtr Optimize(const Graph& inGraph,
870 const std::vector<BackendId>& backendPreferences,
871 const IDeviceSpec& deviceSpec,
872 const OptimizerOptions& options,
873 Optional<std::vector<std::string>&> messages = EmptyOptional());
telsoa014fcda012018-03-09 14:13:49 +0000874} //namespace armnn