blob: 27d072004eef5c76bfe109944d3b48580bb3591c [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
Finn Williamsa4983ce2020-07-23 12:55:12 +010065 void Disconnect(armnn::IInputSlot& inputSlot);
66
arovir01b0717b52018-09-05 17:03:25 +010067 const armnn::TensorInfo& GetTensorInfo() const;
68
69private:
70 armnn::IOutputSlot* m_OutputSlot;
71 bool m_Valid;
72 armnn::TensorInfo m_TensorInfo;
73};
74
75class ConstTensorPin
76{
77public:
78 // Creates an invalid tensor pin (can be used to signal errors)
79 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
80 ConstTensorPin(bool optional = false);
81
82 // @param tensorInfo TensorInfo associated with the tensor.
83 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
84 // the model being converted.
85 // @param numBytes Number of bytes for the tensor data.
86 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
87 const armnn::PermutationVector& mappings);
88
89 ConstTensorPin(const ConstTensorPin& other) = delete;
90 ConstTensorPin(ConstTensorPin&& other) = default;
91
92 bool IsValid() const;
93 bool IsOptional() const;
94
95 const armnn::ConstTensor& GetConstTensor() const;
96 const armnn::ConstTensor* GetConstTensorPtr() const;
97
98private:
99 armnn::ConstTensor m_ConstTensor;
100
101 // Owned memory for swizzled tensor data, only required if the tensor needed
102 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
103 // the pools associated with the model being converted.
104 std::vector<uint8_t> m_SwizzledTensorData;
105
106 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
107 bool m_Optional;
108};
109
110} // namespace armnn_driver
111
112///
113/// Utility functions
114///
115
116namespace
117{
118
119using namespace armnn_driver;
120using namespace android::nn;
121
122// Convenience function to log the reason for failing to convert a model.
123// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
124template<class... Args>
125static bool Fail(const char* formatStr, Args&&... args)
126{
127 ALOGD(formatStr, std::forward<Args>(args)...);
128 return false;
129}
130
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100131// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
132// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
133#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100134try \
135{ \
136 for (auto&& backendId : backends) \
137 { \
138 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
139 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100140 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100141 std::string reasonIfUnsupported; \
142 supported = \
143 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
144 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100146 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100147 } \
148 else \
149 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100150 if (reasonIfUnsupported.size() > 0) \
151 { \
152 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
153 } \
154 else \
155 { \
156 ALOGD("%s: not supported by armnn", funcName); \
157 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100158 } \
159 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100163 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100164 } \
165 if (!supported) \
166 { \
167 ALOGD("%s: not supported by any specified backend", funcName); \
168 } \
169} \
170catch (const armnn::InvalidArgumentException &e) \
171{ \
172 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
173}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100174
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000175template<typename HalOperand>
176armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100177{
178 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
179}
180
Matthew Bentham912b3622019-05-03 15:49:14 +0100181inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100182{
Matthew Bentham912b3622019-05-03 15:49:14 +0100183 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
184 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100186}
187
Kevin May42477c12020-03-26 13:34:14 +0000188#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189
Keith Davis71006492020-01-06 17:44:16 +0000190// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100191inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
192{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000194 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000195 type == V1_2::OperandType::TENSOR_FLOAT16 ||
196 type == V1_2::OperandType::TENSOR_FLOAT32 ||
197 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000198 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
200 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100201 type == V1_2::OperandType::TENSOR_INT32;
202}
203
204#endif
205
Kevin May42477c12020-03-26 13:34:14 +0000206#ifdef ARMNN_ANDROID_NN_V1_3
207
208// Support within the 1.3 driver for specific tensor data types
209inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
210{
211 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100212 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000213 type == V1_3::OperandType::TENSOR_FLOAT16 ||
214 type == V1_3::OperandType::TENSOR_FLOAT32 ||
215 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100216 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000217 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
218 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
219 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
220 type == V1_3::OperandType::TENSOR_INT32;
221}
222
223#endif
224
Mike Kellyb5fdf382019-06-11 16:35:25 +0100225inline bool IsBool(V1_0::Operand)
226{
227 return false;
228}
229
Kevin May42477c12020-03-26 13:34:14 +0000230inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100231{
232 return false;
233}
234
Kevin May42477c12020-03-26 13:34:14 +0000235#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100236
237inline bool IsBool(V1_2::Operand operand)
238{
239 return operand.type == V1_2::OperandType::BOOL;
240}
241
Sadik Armagan61113162019-07-25 09:09:40 +0100242/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000243inline bool Is12OrLaterOperand(V1_2::Operand)
244{
245 return true;
246}
247
248#endif
249
250#ifdef ARMNN_ANDROID_NN_V1_3
251
252inline bool IsBool(V1_3::Operand operand)
253{
254 return operand.type == V1_3::OperandType::BOOL;
255}
256
257/// Checks if a operand is 1_2 Operand
258inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100259{
260 return true;
261}
262
Mike Kellyb5fdf382019-06-11 16:35:25 +0100263#endif
264
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100265template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000266armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
267 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100268 armnn::TensorInfo reshapeInfo)
269{
270 armnn::ReshapeDescriptor reshapeDescriptor;
271 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
272
273 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100274 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275
276 // Attach the input layer to the reshape layer
277 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
278 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
279
280 return *reshapeLayer;
281}
282
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000283bool BroadcastTensor(LayerInputHandle& input0,
284 LayerInputHandle& input1,
285 armnn::IConnectableLayer* startLayer,
286 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100287{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100288 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100289
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100290 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
291 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
292
293 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
294 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
295
296 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100297 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100298 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
299 input0.Connect(startLayer->GetInputSlot(0));
300 input1.Connect(startLayer->GetInputSlot(1));
301
Sadik Armagan64b19b52019-08-19 09:49:58 +0100302 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100303 }
304
305 // Since the number of dimensions do not match then we need to add degenerate dimensions
306 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
307
308 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
309 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
310 boost::numeric_cast<int>(inputDimensions1));
311
312 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
313 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
314 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
315
316 const armnn::TensorShape& smallShape = smallInfo.GetShape();
317 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
318 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
319 {
320 reshapedDimensions[i] = smallShape[i - sizeDifference];
321 }
322
323 armnn::TensorInfo reshapedInfo = smallInfo;
324 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
325 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100326
327 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
328 armnn::ReshapeDescriptor reshapeDescriptor;
329
330 bool isSupported = false;
331 FORWARD_LAYER_SUPPORT_FUNC(__func__,
332 IsReshapeSupported,
333 data.m_Backends,
334 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000335 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100336 reshapedInfo,
337 reshapeDescriptor);
338 if (!isSupported)
339 {
340 return false;
341 }
342
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100343 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100344 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100345
346 if (input0IsSmaller)
347 {
348 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
349 //
350 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100351 // | |
352 // Reshape |
353 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100355
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100356 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
357 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100358 }
359 else
360 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100361 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
362 //
363 // Input0 Input1
364 // | |
365 // | Reshape
366 // \ /
367 // StartLayer
368
arovir01b0717b52018-09-05 17:03:25 +0100369 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100370 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100371 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100372
373 return true;
arovir01b0717b52018-09-05 17:03:25 +0100374}
375
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000376void CalcPadding(uint32_t input,
377 uint32_t kernel,
378 uint32_t stride,
379 uint32_t& outPadHead,
380 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100381 android::nn::PaddingScheme scheme)
382{
383 int32_t padHead;
384 int32_t padTail;
385 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
386 outPadHead = boost::numeric_cast<uint32_t>(padHead);
387 outPadTail = boost::numeric_cast<uint32_t>(padTail);
388}
389
Kevin May42477c12020-03-26 13:34:14 +0000390#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100391
392void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
393 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
394{
395 int32_t padHead;
396 int32_t padTail;
397 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
398 outPadHead = boost::numeric_cast<uint32_t>(padHead);
399 outPadTail = boost::numeric_cast<uint32_t>(padTail);
400}
401
Mike Kelly26123db2020-01-15 10:02:33 +0000402void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100403 int32_t& outPadTail, android::nn::PaddingScheme scheme)
404{
405 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
406}
407
Mike Kelly86b36d42019-07-12 16:39:33 +0100408#endif
409
Matthew Bentham912b3622019-05-03 15:49:14 +0100410Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100411{
412 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100413 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100414 shape.dimensions = operand.dimensions;
415 shape.scale = operand.scale;
416 shape.offset = operand.zeroPoint;
417 return shape;
418}
419
Kevin May42477c12020-03-26 13:34:14 +0000420#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100421
422Shape GetOperandShape(const V1_2::Operand& operand)
423{
424 Shape shape;
425 shape.type = OperandType(operand.type);
426 shape.dimensions = operand.dimensions;
427 shape.scale = operand.scale;
428 shape.offset = operand.zeroPoint;
429 return shape;
430}
431
432#endif
433
Kevin May42477c12020-03-26 13:34:14 +0000434#ifdef ARMNN_ANDROID_NN_V1_3
435
436Shape GetOperandShape(const V1_3::Operand& operand)
437{
438 Shape shape;
439 shape.type = OperandType(operand.type);
440 shape.dimensions = operand.dimensions;
441 shape.scale = operand.scale;
442 shape.offset = operand.zeroPoint;
443 return shape;
444}
445
446#endif
447
arovir01b0717b52018-09-05 17:03:25 +0100448// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
449// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100450// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
451// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100452void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000453 const armnn::TensorInfo& weightInfo,
454 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100455{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100457 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000458 // NOTE: Bias scale is always set to 0 for per-axis quantization and
459 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
460 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100461 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000462 return biasScale * inputInfo.GetQuantizationScale();
463 };
464
465 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
466 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
467
468 biasInfo.SetQuantizationScales(biasScales);
469 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
470
471 ALOGV("Bias quantization params have been updated for per-axis quantization");
472 }
473 else
474 {
475 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
476 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
477 {
478 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
479 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
480 {
481 ALOGW("Bias quantization scale has been modified to match input * weights");
482 biasInfo.SetQuantizationScale(expectedBiasScale);
483 }
arovir01b0717b52018-09-05 17:03:25 +0100484 }
485 }
486}
487
488// 4D Tensor Permutations
489const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100490const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
491
492// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000493const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
494const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100495
496template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000497armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
498 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100499{
500 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000501 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100502
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100503 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100504
505 // Connect input to swizzle layer
506 input.Connect(layer->GetInputSlot(0));
507
508 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000509 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100510 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
511
512 return *layer;
513}
514
arovir01b0717b52018-09-05 17:03:25 +0100515bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
516 const armnn::TensorShape & outputShape,
517 uint32_t concatDim)
518{
519 // Validate the output shape is correct given the input shapes (which have just been validated)
520 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
521 if (outputShape.GetNumDimensions() != numDimensions)
522 {
523 return Fail("%s: Output shape has wrong number of dimensions", __func__);
524 }
525
526 unsigned int outputSizeAlongConcatenatedDimension = 0;
527 for (unsigned int i = 0; i < inputShapes.size(); i++)
528 {
529 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
530 }
531
532 for (unsigned int i = 0; i < numDimensions; ++i)
533 {
534 if (i == concatDim)
535 {
536 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
537 {
538 return Fail(
539 "%s: Invalid output shape for dimension %d (%d != %d)",
540 __func__,
541 i,
542 outputShape[i],
543 outputSizeAlongConcatenatedDimension);
544 }
545 }
546 else
547 {
548 if (outputShape[i] != inputShapes[0][i])
549 {
550 return Fail("%s: Invalid output shape", __func__);
551 }
552 }
553 }
554
555 return true;
556}
557
558bool RequiresReshape(armnn::TensorShape & inputShape)
559{
560 return inputShape.GetNumDimensions() < 3;
561}
562
arovir01b0717b52018-09-05 17:03:25 +0100563void SwizzleInputs(armnn::INetwork& network,
564 std::vector<LayerInputHandle>& inputs,
565 std::vector<armnn::TensorShape>& inputShapes,
566 const armnn::PermutationVector& mapping)
567{
568 if (!mapping.IsEqual(IdentityPermutation4D))
569 {
570 size_t nInputs = inputs.size();
571 for (size_t i=0; i<nInputs; ++i)
572 {
573 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000574 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100575 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
576 auto& outputInfo = outputSlot.GetTensorInfo();
577 // replace inputs with the swizzled ones
578 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
579 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
580 }
581 }
582}
583
Teresa Charlin185f5882020-04-06 21:59:18 +0100584bool TransposeInputTensors(ConversionData& data,
585 std::vector<LayerInputHandle>& inputs,
586 std::vector<armnn::TensorShape>& inputShapes,
587 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000588{
589 if (!mapping.IsEqual(IdentityPermutation4D))
590 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100591 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000592 size_t nInputs = inputs.size();
593 for (size_t i=0; i<nInputs; ++i)
594 {
595 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000596 armnn::TransposeDescriptor transposeDesc;
597 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100598 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000599
600 bool isSupported = false;
601 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000602 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000603 data.m_Backends,
604 isSupported,
605 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100606 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000607 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000608 if (!isSupported)
609 {
610 return false;
611 }
612
613 }
614 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
615 }
616 return true;
617}
618
619
narpra01f176d5a2018-11-18 20:17:48 +0000620bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
621 int32_t & concatDimension,
622 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100623{
narpra01f176d5a2018-11-18 20:17:48 +0000624 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100625 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100626
627 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000628 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
629 // or along dimension 0 or 2 for a 3-D tensor.
630 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100631 {
narpra01f176d5a2018-11-18 20:17:48 +0000632 concatDimension = 1;
633 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
634 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100635 }
narpra01f176d5a2018-11-18 20:17:48 +0000636 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100637 {
narpra01f176d5a2018-11-18 20:17:48 +0000638 concatDimension = 0;
639 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
640 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100641 }
narpra01f176d5a2018-11-18 20:17:48 +0000642 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100643}
644
645} // anonymous namespace
646
647namespace armnn_driver
648{
649
650//// Creates an ArmNN activation layer and connects it to the given layer, if the
651//// passed in AndroidNN activation function requires so.
652//// @return The end layer of the sequence of layers built for the given AndroidNN
653//// activation function or nullptr if an error occurred (e.g. unsupported activation).
654//// Note that the end layer matches the input layer if no activation is required
655//// (the sequence of layers has length 1).
656armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
657 ActivationFn activation,
658 armnn::IConnectableLayer* prevLayer,
659 ConversionData& data);
660
661} // namespace armnn_driver
662
663///
664/// Utility templates
665///
666
667namespace armnn_driver
668{
669
670using namespace android::nn;
671
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672template<typename HalPolicy,
673 typename HalOperand = typename HalPolicy::Operand,
674 typename HalOperation = typename HalPolicy::Operation,
675 typename HalModel = typename HalPolicy::Model>
676const HalOperand* GetInputOperand(const HalOperation& operation,
677 uint32_t inputIndex,
678 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100679 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100680{
681 if (inputIndex >= operation.inputs.size())
682 {
saoste01b8471482018-10-10 09:44:51 +0100683 if (failOnIndexOutOfBounds)
684 {
685 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
686 }
arovir01b0717b52018-09-05 17:03:25 +0100687 return nullptr;
688 }
689
Kevin May42477c12020-03-26 13:34:14 +0000690 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100691 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000692 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100693}
694
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100695template<typename HalPolicy,
696 typename HalOperand = typename HalPolicy::Operand,
697 typename HalOperation = typename HalPolicy::Operation,
698 typename HalModel = typename HalPolicy::Model>
699const HalOperand* GetOutputOperand(const HalOperation& operation,
700 uint32_t outputIndex,
701 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100702{
703 if (outputIndex >= operation.outputs.size())
704 {
705 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
706 return nullptr;
707 }
708
709 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100710 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100711
Kevin May42477c12020-03-26 13:34:14 +0000712 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100713}
714
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100715template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100716 typename HalOperand = typename HalPolicy::Operand,
717 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100718const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100719 const HalModel& model,
720 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000721 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100722{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100724
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100726 switch (operand.lifetime)
727 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100728 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100729 {
730 // Constant found in model.operandValues
731 valueStart = &model.operandValues[operand.location.offset];
732 break;
733 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100735 {
736 // Constant specified via a Memory object
737 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
738 break;
739 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000741 {
742 // An optional input tensor with no values is not an error so should not register as a fail
743 if (optional)
744 {
745 valueStart = nullptr;
746 break;
747 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100748 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000749 }
arovir01b0717b52018-09-05 17:03:25 +0100750 default:
751 {
752 // Unsupported/invalid (e.g. can't get value of an input to the model)
753 Fail("%s: unsupported/invalid operand lifetime: %s",
754 __func__, toString(operand.lifetime).c_str());
755 valueStart = nullptr;
756 }
757 }
758
759 return valueStart;
760}
761
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100762template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100763 typename HalOperation = typename HalPolicy::Operation,
764 typename HalModel = typename HalPolicy::Model,
765 typename HalOperandType = typename HalPolicy::OperandType>
766bool GetOperandType(const HalOperation& operation,
767 uint32_t inputIndex,
768 const HalModel& model,
769 HalOperandType& type)
770{
771 using HalOperand = typename HalPolicy::Operand;
772
773 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
774 if (!operand)
775 {
776 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
777 }
778
779 type = operand->type;
780 return true;
781}
782
783template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000784 typename HalOperand = typename HalPolicy::Operand>
785bool IsOperandConstant(const HalOperand& operand)
786{
787 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
788
789 HalOperandLifeTime lifetime = operand.lifetime;
790
791 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
792 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
793 lifetime == HalOperandLifeTime::NO_VALUE;
794}
795
796template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100797 typename HalOperand = typename HalPolicy::Operand,
798 typename HalModel = typename HalPolicy::Model>
799ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
800 const HalModel& model,
801 const ConversionData& data,
802 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
803 const armnn::TensorShape* overrideTensorShape = nullptr,
804 bool optional = false)
805{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100806 if (!IsOperandTypeSupportedForTensors(operand.type))
807 {
808 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
809 return ConstTensorPin();
810 }
811
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000812 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813 {
814 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
815 return ConstTensorPin();
816 }
817
818 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
819 if (!valueStart)
820 {
821 if (optional)
822 {
823 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
824 return ConstTensorPin(true);
825 }
826 // mandatory tensor with no values
827 Fail("%s: failed to get operand address", __func__);
828 return ConstTensorPin();
829 }
830
831 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000832 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
833 if (tensorInfo.HasPerAxisQuantization())
834 {
835 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
836 }
837
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100838 if (overrideTensorShape != nullptr)
839 {
840 tensorInfo.SetShape(*overrideTensorShape);
841 }
842 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
843}
844
845template<typename HalPolicy,
846 typename HalOperation = typename HalPolicy::Operation,
847 typename HalModel = typename HalPolicy::Model>
848ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
849 uint32_t inputIndex,
850 const HalModel& model,
851 const ConversionData& data,
852 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
853 const armnn::TensorShape* overrideTensorShape = nullptr,
854 bool optional = false)
855{
856 using HalOperand = typename HalPolicy::Operand;
857
858 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
859 if (!operand)
860 {
861 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
862 return ConstTensorPin();
863 }
864 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
865 model,
866 data,
867 dimensionMappings,
868 overrideTensorShape,
869 optional);
870}
871
872template<typename HalPolicy,
873 typename OutputType,
874 typename HalOperandType = typename HalPolicy::OperandType,
875 typename HalOperation = typename HalPolicy::Operation,
876 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100877bool GetInputScalar(const HalOperation& operation,
878 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100879 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100880 OutputType& outValue,
881 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100882 const ConversionData& data,
883 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100884{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 using HalOperand = typename HalPolicy::Operand;
886
887 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100888 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100889 {
890 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
891 }
892
Sadik Armagan813f2302020-05-19 14:10:30 +0100893 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100894 {
895 return Fail("%s: unexpected operand type: %s (should be %s)",
896 __func__, toString(operand->type).c_str(), toString(type).c_str());
897 }
898
Sadik Armagan813f2302020-05-19 14:10:30 +0100899 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100900 {
901 return Fail("%s: incorrect operand location length: %i (should be %i)",
902 __func__, operand->location.length, sizeof(OutputType));
903 }
904
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100905 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100906 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: failed to get address for operand", __func__);
909 }
910
Sadik Armagan813f2302020-05-19 14:10:30 +0100911 if(!optional)
912 {
913 outValue = *(static_cast<const OutputType*>(valueAddress));
914 }
915
arovir01b0717b52018-09-05 17:03:25 +0100916 return true;
917}
918
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100919template<typename HalPolicy,
920 typename HalOperation = typename HalPolicy::Operation,
921 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100922bool GetInputInt32(const HalOperation& operation,
923 uint32_t inputIndex,
924 int32_t& outValue,
925 const HalModel& model,
926 const ConversionData& data)
927{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100929}
930
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100931template<typename HalPolicy,
932 typename HalOperation = typename HalPolicy::Operation,
933 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100934bool GetInputFloat32(const HalOperation& operation,
935 uint32_t inputIndex,
936 float& outValue,
937 const HalModel& model,
938 const ConversionData& data)
939{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100941}
942
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943template<typename HalPolicy,
944 typename HalOperation = typename HalPolicy::Operation,
945 typename HalOperandType = typename HalPolicy::OperandType,
946 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100947bool GetInputActivationFunctionImpl(const HalOperation& operation,
948 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100949 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100950 ActivationFn& outActivationFunction,
951 const HalModel& model,
952 const ConversionData& data)
953{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100954 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100955 {
956 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
957 __func__,
958 toString(type).c_str(),
959 toString(OperandType::INT32).c_str(),
960 toString(OperandType::TENSOR_INT32).c_str());
961 }
962
963 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100964 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100965 {
966 return Fail("%s: failed to get activation input value", __func__);
967 }
968 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
969 return true;
970}
971
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972template<typename HalPolicy,
973 typename HalOperation = typename HalPolicy::Operation,
974 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100975bool GetInputActivationFunction(const HalOperation& operation,
976 uint32_t inputIndex,
977 ActivationFn& outActivationFunction,
978 const HalModel& model,
979 const ConversionData& data)
980{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981 return GetInputActivationFunctionImpl<HalPolicy>(operation,
982 inputIndex,
983 HalPolicy::OperandType::INT32,
984 outActivationFunction,
985 model,
986 data);
arovir01b0717b52018-09-05 17:03:25 +0100987}
988
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100989template<typename HalPolicy,
990 typename HalOperation = typename HalPolicy::Operation,
991 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100992bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
993 uint32_t inputIndex,
994 ActivationFn& outActivationFunction,
995 const HalModel& model,
996 const ConversionData& data)
997{
998 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100999 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1000 inputIndex,
1001 HalPolicy::OperandType::INT32,
1002 outActivationFunction,
1003 model,
1004 data);
arovir01b0717b52018-09-05 17:03:25 +01001005}
1006
1007
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008template<typename HalPolicy,
1009 typename HalOperation = typename HalPolicy::Operation,
1010 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001011bool GetOptionalInputActivation(const HalOperation& operation,
1012 uint32_t inputIndex,
1013 ActivationFn& activationFunction,
1014 const HalModel& model,
1015 const ConversionData& data)
1016{
1017 if (operation.inputs.size() <= inputIndex)
1018 {
1019 activationFunction = ActivationFn::kActivationNone;
1020 }
1021 else
1022 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001024 {
1025 return Fail("%s: Operation has invalid inputs", __func__);
1026 }
1027 }
1028 return true;
1029}
1030
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031template<typename HalPolicy,
1032 typename ConvolutionDescriptor,
1033 typename HalOperation = typename HalPolicy::Operation,
1034 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001035bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1036 uint32_t dilationXIndex,
1037 ConvolutionDescriptor& descriptor,
1038 const HalModel& model,
1039 const ConversionData& data)
1040{
1041 bool success = true;
1042 if (operation.inputs.size() >= dilationXIndex + 2)
1043 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001044 success &= GetInputScalar<HalPolicy>(operation,
1045 dilationXIndex,
1046 HalPolicy::OperandType::INT32,
1047 descriptor.m_DilationX,
1048 model,
1049 data);
1050 success &= GetInputScalar<HalPolicy>(operation,
1051 dilationXIndex + 1,
1052 HalPolicy::OperandType::INT32,
1053 descriptor.m_DilationY,
1054 model,
1055 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001056 }
1057
1058 return success;
1059}
1060
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001061template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001062 typename HalOperation = typename HalPolicy::Operation,
1063 typename HalModel = typename HalPolicy::Model>
1064bool GetOptionalBool(const HalOperation& operation,
1065 uint32_t inputIndex,
1066 const HalModel& model,
1067 const ConversionData& data)
1068{
1069 using HalOperand = typename HalPolicy::Operand;
1070
1071 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1072 if (!operand)
1073 {
1074 return false;
1075 }
1076
1077 if (!IsBool(*operand))
1078 {
1079 return false;
1080 }
1081
1082 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1083 if (!valueAddress)
1084 {
1085 return false;
1086 }
1087
1088 if (*(static_cast<const bool*>(valueAddress)))
1089 {
1090 return true;
1091 }
1092 else
1093 {
1094 return false;
1095 }
1096}
1097
1098template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099 typename HalOperand = typename HalPolicy::Operand,
1100 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001101bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001102 std::vector<int32_t>& outValues,
1103 const HalModel& model,
1104 const ConversionData& data)
1105{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001106 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001107 {
1108 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1109 }
1110
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001111 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001112 if (!startAddress)
1113 {
1114 return Fail("%s: failed to get operand address", __func__, operand.type);
1115 }
1116
1117 // Check number of bytes is sensible
1118 const uint32_t numBytes = operand.location.length;
1119 if (numBytes % sizeof(int32_t) != 0)
1120 {
1121 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1122 __func__, numBytes, sizeof(int32_t));
1123 }
1124
1125 outValues.resize(numBytes / sizeof(int32_t));
1126 memcpy(outValues.data(), startAddress, numBytes);
1127 return true;
1128}
1129
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001130template<typename HalPolicy,
1131 typename HalOperation = typename HalPolicy::Operation,
1132 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001133bool GetInputPaddingScheme(const HalOperation& operation,
1134 uint32_t inputIndex,
1135 PaddingScheme& outPaddingScheme,
1136 const HalModel& model,
1137 const ConversionData& data)
1138{
1139 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001140 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001141 {
1142 return Fail("%s: failed to get padding scheme input value", __func__);
1143 }
1144
1145 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1146 return true;
1147}
1148
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001149template<typename HalPolicy,
1150 typename HalOperation = typename HalPolicy::Operation,
1151 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001152LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1153 uint32_t inputIndex,
1154 const HalModel& model,
1155 ConversionData& data)
1156{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001158 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001159 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1160
1161 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001162 if (!operand)
1163 {
1164 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1165 return LayerInputHandle();
1166 }
1167
1168 if (!IsOperandTypeSupportedForTensors(operand->type))
1169 {
1170 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1171 return LayerInputHandle();
1172 }
1173
Sadik Armagan44bcc022019-06-18 17:21:36 +01001174 try
arovir01b0717b52018-09-05 17:03:25 +01001175 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001176 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001177 if (IsDynamicTensor(operandTensorInfo))
1178 {
1179 Fail("%s: dynamic input tensors are not supported", __func__);
1180 return LayerInputHandle();
1181 }
arovir01b0717b52018-09-05 17:03:25 +01001182
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001186 {
1187 // NOTE: We must check whether we can support the input tensor on at least one
1188 // of the provided backends; otherwise we cannot convert the operation
1189 bool isInputSupported = false;
1190 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1191 IsInputSupported,
1192 data.m_Backends,
1193 isInputSupported,
1194 operandTensorInfo);
1195
1196 if (!isInputSupported)
1197 {
1198 Fail("%s: unsupported input tensor", __func__);
1199 return LayerInputHandle();
1200 }
1201
1202 BOOST_FALLTHROUGH; // intentional fallthrough
1203 }
1204 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001205 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001206 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001207 // The tensor is either an operand internal to the model, or a model input.
1208 // It can be associated with an ArmNN output slot for an existing layer.
1209
1210 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1211 const uint32_t operandIndex = operation.inputs[inputIndex];
1212 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001213 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001214 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001215 case HalOperandLifeTime::CONSTANT_REFERENCE:
1216 {
1217 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1218 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1219 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001220 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001221 bool isSupported = false;
1222 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1223 IsConstantSupported,
1224 data.m_Backends,
1225 isSupported,
1226 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001227 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001228 {
1229 return LayerInputHandle();
1230 }
1231
1232 armnn::IConnectableLayer* constantLayer =
1233 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1234 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1235 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1236
1237 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1238 }
1239 else
1240 {
1241 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001242 return LayerInputHandle();
1243 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001244 break;
arovir01b0717b52018-09-05 17:03:25 +01001245 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001246 default:
arovir01b0717b52018-09-05 17:03:25 +01001247 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001248 // Unsupported lifetime for an input tensor
1249 Fail("%s: unsupported lifetime for input tensor: %s",
1250 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001251 return LayerInputHandle();
1252 }
arovir01b0717b52018-09-05 17:03:25 +01001253 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001254 }
1255 catch (UnsupportedOperand<HalOperandType>& e)
1256 {
1257 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1258 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001259 }
1260}
1261
Kevin May42477c12020-03-26 13:34:14 +00001262
1263#ifdef ARMNN_ANDROID_NN_V1_3
1264template<typename HalPolicy>
1265LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1266 uint32_t inputIndex,
1267 const::android::hardware::neuralnetworks::V1_3::Model& model,
1268 ConversionData& data)
1269{
1270 using HalOperand = typename HalPolicy::Operand;
1271 using HalOperandType = typename HalPolicy::OperandType;
1272 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1273
1274 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1275 if (!operand)
1276 {
1277 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1278 return LayerInputHandle();
1279 }
1280
1281 if (!IsOperandTypeSupportedForTensors(operand->type))
1282 {
1283 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1284 return LayerInputHandle();
1285 }
1286
1287 try
1288 {
1289 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001290
Kevin May42477c12020-03-26 13:34:14 +00001291 if (IsDynamicTensor(operandTensorInfo))
1292 {
Finn Williams9a044412020-08-17 19:08:35 +01001293 const uint32_t operandIndex = operation.inputs[inputIndex];
1294
1295 // Check if the dynamic input tensors have been inferred by one of the previous layers
1296 // If not we can't support them
1297 if(data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
1298 {
1299 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1300 }
1301 else
1302 {
1303 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1304 return LayerInputHandle();
1305 }
Kevin May42477c12020-03-26 13:34:14 +00001306 }
1307
1308 switch (operand->lifetime)
1309 {
1310 case HalOperandLifeTime::SUBGRAPH_INPUT:
1311 {
1312 // NOTE: We must check whether we can support the input tensor on at least one
1313 // of the provided backends; otherwise we cannot convert the operation
1314 bool isInputSupported = false;
1315 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1316 IsInputSupported,
1317 data.m_Backends,
1318 isInputSupported,
1319 operandTensorInfo);
1320
1321 if (!isInputSupported)
1322 {
1323 Fail("%s: unsupported input tensor", __func__);
1324 return LayerInputHandle();
1325 }
1326
1327 BOOST_FALLTHROUGH; // intentional fallthrough
1328 }
1329 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1330 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1331 {
1332 // The tensor is either an operand internal to the model, or a model input.
1333 // It can be associated with an ArmNN output slot for an existing layer.
1334
1335 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1336 const uint32_t operandIndex = operation.inputs[inputIndex];
1337 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1338 }
1339 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1340 case HalOperandLifeTime::CONSTANT_REFERENCE:
1341 {
1342 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1343 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1344 if (tensorPin.IsValid())
1345 {
1346 bool isSupported = false;
1347 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1348 IsConstantSupported,
1349 data.m_Backends,
1350 isSupported,
1351 tensorPin.GetConstTensor().GetInfo());
1352 if (!isSupported)
1353 {
1354 return LayerInputHandle();
1355 }
1356
1357 armnn::IConnectableLayer* constantLayer =
1358 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1359 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1360 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1361
1362 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1363 }
1364 else
1365 {
1366 Fail("%s: invalid operand tensor", __func__);
1367 return LayerInputHandle();
1368 }
1369 break;
1370 }
1371 default:
1372 {
1373 // Unsupported lifetime for an input tensor
1374 Fail("%s: unsupported lifetime for input tensor: %s",
1375 __func__, toString(operand->lifetime).c_str());
1376 return LayerInputHandle();
1377 }
1378 }
1379 }
1380 catch (UnsupportedOperand<HalOperandType>& e)
1381 {
1382 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1383 return LayerInputHandle();
1384 }
1385}
1386#endif
1387
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001388template<typename HalPolicy,
1389 typename HalOperation = typename HalPolicy::Operation,
1390 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001391bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1392 uint32_t operationOutputIndex,
1393 armnn::IConnectableLayer& layer,
1394 uint32_t layerOutputIndex,
1395 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001396 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001397 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1398 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001399{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001400 using HalOperand = typename HalPolicy::Operand;
1401
1402 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001403 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1404 {
1405 return false;
1406 }
1407
1408 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1409
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001410 if (overrideOutputInfo == nullptr)
1411 {
1412 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1413 }
1414 else
1415 {
1416 outputSlot.SetTensorInfo(*overrideOutputInfo);
1417 }
1418
1419 // Type one dynamic tensors require the previous layer's output shape for inference
1420 if (!layer.GetInputSlot(0).GetConnection() &&
1421 IsDynamicTensor(outputSlot.GetTensorInfo()))
1422 {
1423 return false;
1424 }
1425
Finn Williamsa4983ce2020-07-23 12:55:12 +01001426 bool isSupported = false;
1427 if (validateFunc &&
1428 layer.GetInputSlot(0).GetConnection() &&
1429 IsDynamicTensor(outputSlot.GetTensorInfo()))
Sadik Armagan813f2302020-05-19 14:10:30 +01001430 {
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001431 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001432 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001433 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001434 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1435
1436 if(!isSupported)
1437 {
1438 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1439 {
1440 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1441 }
1442
1443 return false;
1444 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001445 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001446
Finn Williamsa4983ce2020-07-23 12:55:12 +01001447 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1448 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1449
Mike Kellyb5fdf382019-06-11 16:35:25 +01001450 return true;
1451}
1452
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001453template<typename HalPolicy,
1454 typename HalOperation = typename HalPolicy::Operation,
1455 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1457 uint32_t inputIndex,
1458 const HalModel& model,
1459 ConversionData& data)
1460{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001461 using HalOperand = typename HalPolicy::Operand;
1462
1463 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001464 if (!operand)
1465 {
1466 return armnn::DataLayout::NHWC;
1467 }
1468
1469 if (!IsBool(*operand))
1470 {
1471 return armnn::DataLayout::NHWC;
1472 }
1473
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001474 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001475 if (!valueAddress)
1476 {
1477 return armnn::DataLayout::NHWC;
1478 }
1479
1480 if (*(static_cast<const bool*>(valueAddress)))
1481 {
1482 return armnn::DataLayout::NCHW;
1483 }
1484 else
1485 {
1486 return armnn::DataLayout::NHWC;
1487 }
1488}
1489
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001490template<typename HalPolicy,
1491 typename HalOperation = typename HalPolicy::Operation,
1492 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001493bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1494 uint32_t outputIndex,
1495 armnn::IConnectableLayer& layer,
1496 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001497 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001498 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1499 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001500{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001501 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1502 outputIndex,
1503 layer,
1504 outputIndex,
1505 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001506 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001507 overrideOutputInfo,
1508 validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001509}
1510
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001511template<typename HalPolicy,
1512 typename HalOperation = typename HalPolicy::Operation,
1513 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001514bool ConvertToActivation(const HalOperation& operation,
1515 const char* operationName,
1516 const armnn::ActivationDescriptor& activationDesc,
1517 const HalModel& model,
1518 ConversionData& data)
1519{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001520 using HalOperand = typename HalPolicy::Operand;
1521
1522 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001523 if (!input.IsValid())
1524 {
1525 return Fail("%s: Input 0 is invalid", operationName);
1526 }
1527
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001528 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001529 if (!outputOperand)
1530 {
1531 return false;
1532 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001533
1534 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001535
1536 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001537
1538 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1539 {
1540 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1541 IsActivationSupported,
1542 data.m_Backends,
1543 isSupported,
1544 input.GetTensorInfo(),
1545 outInfo,
1546 activationDesc);
1547 };
1548
1549 if(IsDynamicTensor(outInfo))
1550 {
1551 isSupported = AreDynamicTensorsSupported();
1552 }
1553 else
1554 {
1555 validateFunc(outInfo, isSupported);
1556 }
1557
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001558 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001559 {
1560 return false;
1561 }
1562
1563 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001564 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001565 input.Connect(layer->GetInputSlot(0));
1566
Finn Williamsa4983ce2020-07-23 12:55:12 +01001567 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001568}
1569
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001570template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001571 typename HalOperation = typename HalPolicy::Operation,
1572 typename HalModel = typename HalPolicy::Model>
1573bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1574{
1575 armnn::ActivationDescriptor desc;
1576 desc.m_Function = armnn::ActivationFunction::ReLu;
1577
1578 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1579}
1580
1581template<typename HalPolicy,
1582 typename HalOperation = typename HalPolicy::Operation,
1583 typename HalModel = typename HalPolicy::Model>
1584bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1585{
1586 armnn::ActivationDescriptor desc;
1587 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1588 desc.m_A = 1.0f;
1589 desc.m_B = -1.0f;
1590
1591 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1592}
1593
1594template<typename HalPolicy,
1595 typename HalOperation = typename HalPolicy::Operation,
1596 typename HalModel = typename HalPolicy::Model>
1597bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1598{
1599 armnn::ActivationDescriptor desc;
1600 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1601 desc.m_A = 6.0f;
1602
1603 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1604}
1605
1606template<typename HalPolicy,
1607 typename HalOperation = typename HalPolicy::Operation,
1608 typename HalModel = typename HalPolicy::Model>
1609bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1610{
1611 armnn::ActivationDescriptor desc;
1612 desc.m_Function = armnn::ActivationFunction::TanH;
1613 desc.m_A = 1.0f; // android nn does not support tanH parameters
1614 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1615
1616 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1617}
1618
1619template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001620 typename HalOperation = typename HalPolicy::Operation,
1621 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001622bool ConvertPaddings(const HalOperation& operation,
1623 const HalModel& model,
1624 ConversionData& data,
1625 unsigned int rank,
1626 armnn::PadDescriptor& padDescriptor)
1627{
1628 using HalOperand = typename HalPolicy::Operand;
1629
1630 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1631 if (!paddingsOperand)
1632 {
1633 return Fail("%s: Could not read paddings operand", __func__);
1634 }
1635
1636 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1637 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1638 {
1639 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1640 }
1641
1642 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001643 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1644 {
1645 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1646 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001647
1648 // add padding for each dimension of input tensor.
1649 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1650 {
1651 int paddingBeforeInput = paddings[i];
1652 int paddingAfterInput = paddings[i + 1];
1653
1654 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1655 {
1656 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1657 }
1658
1659 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1660 }
1661
1662 return true;
1663}
1664
1665template<typename HalPolicy,
1666 typename HalOperation = typename HalPolicy::Operation,
1667 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001668bool ConvertPooling2d(const HalOperation& operation,
1669 const char* operationName,
1670 armnn::PoolingAlgorithm poolType,
1671 const HalModel& model,
1672 ConversionData& data)
1673{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001674 using HalOperand = typename HalPolicy::Operand;
1675 using HalOperandType = typename HalPolicy::OperandType;
1676
1677 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001678 if (!input.IsValid())
1679 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001680 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001681 }
1682
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001683 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001684 if (!output)
1685 {
1686 return Fail("%s: Could not read output 0", __func__);
1687 }
1688
1689 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1690 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1691
arovir01b0717b52018-09-05 17:03:25 +01001692 armnn::Pooling2dDescriptor desc;
1693 desc.m_PoolType = poolType;
1694 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001695 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001696
1697 ActivationFn activation;
1698
Sadik Armagan15d63e22019-07-26 16:59:35 +01001699 auto inputSize = operation.inputs.size();
1700
1701 if (inputSize >= 10)
1702 {
1703 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1704 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1705 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1706 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1707 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1708 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1709 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1710 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1711 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1712 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1713 {
1714 return Fail("%s: Operation has invalid inputs", operationName);
1715 }
1716
Kevin May42477c12020-03-26 13:34:14 +00001717 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001718 {
1719 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1720 }
1721 }
1722 else
arovir01b0717b52018-09-05 17:03:25 +01001723 {
1724 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1725 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001726 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1727 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1728 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1729 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1730 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1731 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001732 {
1733 return Fail("%s: Operation has invalid inputs", operationName);
1734 }
1735
Kevin May42477c12020-03-26 13:34:14 +00001736 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001737 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001738 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001739 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001740
1741 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1742 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1743 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1744
1745 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1746 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001747 }
1748
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001749 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001750
1751 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1752 {
1753 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1754 IsPooling2dSupported,
1755 data.m_Backends,
1756 isSupported,
1757 inputInfo,
1758 outputInfo,
1759 desc);
1760
1761 };
1762
1763 if(IsDynamicTensor(outputInfo))
1764 {
1765 isSupported = AreDynamicTensorsSupported();
1766 }
1767 else
1768 {
1769 validateFunc(outputInfo, isSupported);
1770 }
1771
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001772 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001773 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001774 return false;
arovir01b0717b52018-09-05 17:03:25 +01001775 }
arovir01b0717b52018-09-05 17:03:25 +01001776
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001777 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1778 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001779 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001780 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001781 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001782
1783 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1784 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001785 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001786 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001787 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001788
1789 input.Connect(pooling2dLayer->GetInputSlot(0));
1790
Finn Williamsa4983ce2020-07-23 12:55:12 +01001791 if (!isSupported)
1792 {
1793 return false;
1794 }
1795
1796 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001797}
1798
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001799template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001800 typename HalOperation = typename HalPolicy::Operation,
1801 typename HalModel = typename HalPolicy::Model>
1802bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001803{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001804 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001805
1806 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1807 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1808
1809 if (!input0.IsValid() || !input1.IsValid())
1810 {
1811 return Fail("%s: Operation has invalid inputs", __func__);
1812 }
1813
1814 // The FuseActivation parameter is always the input index 2
1815 // and it should be optional
1816 ActivationFn activationFunction;
1817 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1818 {
1819 return Fail("%s: Operation has invalid inputs", __func__);
1820 }
1821
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001822 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001823 if (!outputOperand)
1824 {
1825 return false;
1826 }
1827
1828 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1829 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1830
1831 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001832
1833 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001834 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1835 {
1836 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1837 IsAdditionSupported,
1838 data.m_Backends,
1839 isSupported,
1840 inputInfo0,
1841 inputInfo1,
1842 outputInfo);
1843 };
1844
1845 if(!IsDynamicTensor(outputInfo))
1846 {
1847 validateFunc(outputInfo, isSupported);
1848 }
1849 else
1850 {
1851 isSupported = AreDynamicTensorsSupported();
1852 }
1853
Mike Kelly46272802019-08-14 17:00:48 +01001854 if (!isSupported)
1855 {
1856 return false;
1857 }
1858
1859 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1860 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1861
1862 if (endLayer != nullptr)
1863 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001864 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001865 if (!isReshapeSupported)
1866 {
1867 return false;
1868 }
1869
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001870 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01001871 }
1872 else
1873 {
1874 return Fail("%s: ProcessActivation failed", __func__);
1875 }
1876}
1877
1878template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001879 typename HalOperation = typename HalPolicy::Operation,
1880 typename HalModel = typename HalPolicy::Model>
1881bool ConvertArgMinMax(const HalOperation& operation,
1882 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001883 ConversionData& data,
1884 armnn::ArgMinMaxFunction argMinMaxFunction)
1885{
1886 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1887
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001888 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001889 using HalOperandType = typename HalPolicy::OperandType;
1890
1891 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1892
1893 if (!input0.IsValid())
1894 {
1895 return Fail("%s: Operation has invalid inputs", __func__);
1896 }
1897
1898 int32_t axis;
1899 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1900 {
1901 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1902 }
1903
1904 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1905 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1906
1907 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1908 {
1909 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1910 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1911 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1912 return Fail("%s: Axis must be in range [-n, n)", __func__);
1913 }
1914
1915 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1916 if (!output)
1917 {
1918 return Fail("%s: Could not read output 0", __func__);
1919 }
1920
1921 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1922
1923 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001924
1925 armnn::ArgMinMaxDescriptor descriptor;
1926 descriptor.m_Function = argMinMaxFunction;
1927 descriptor.m_Axis = axis;
1928
1929 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001930
1931 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1932 {
1933 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1934 IsArgMinMaxSupported,
1935 data.m_Backends,
1936 isSupported,
1937 inputInfo0,
1938 outputInfo,
1939 descriptor);
1940 };
1941
1942 if(IsDynamicTensor(outputInfo))
1943 {
1944 isSupported = AreDynamicTensorsSupported();
1945 }
1946 else
1947 {
1948 validateFunc(outputInfo, isSupported);
1949 }
1950
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001951 if (!isSupported)
1952 {
1953 return false;
1954 }
1955
1956 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1957 assert(layer != nullptr);
1958
1959 input0.Connect(layer->GetInputSlot(0));
1960
Finn Williamsa4983ce2020-07-23 12:55:12 +01001961 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001962}
1963
1964template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001965 typename HalOperation = typename HalPolicy::Operation,
1966 typename HalModel = typename HalPolicy::Model>
1967bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001968{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001969 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001970 using HalOperandType = typename HalPolicy::OperandType;
1971
1972 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1973 if (operation.inputs.size() <= 1)
1974 {
1975 return Fail("%s: Operation has insufficient arguments", __func__);
1976 }
1977
1978 // Get inputs and outputs
1979 const std::size_t numInputTensors = operation.inputs.size() - 1;
1980
1981 int32_t concatDim;
1982 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1983 {
1984 return Fail("%s: Operation has invalid inputs", __func__);
1985 }
1986
1987 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1988 if (!outputOperand)
1989 {
1990 return Fail("%s: Operation has no outputs", __func__);
1991 }
1992
Mike Kellyb8805202019-07-31 17:25:43 +01001993 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1994 armnn::TensorShape outputShape = outputInfo.GetShape();
1995
1996 //
1997 // handle negative concat dims along the lines of tensorflow as described here:
1998 // https://www.tensorflow.org/api_docs/python/tf/concat
1999 // "negative axis refers to axis + rank(values)-th dimension"
2000 //
2001 if (concatDim < 0)
2002 {
2003 concatDim += outputShape.GetNumDimensions();
2004 }
2005
2006 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2007 {
2008 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2009 }
2010
2011 std::vector<LayerInputHandle> inputHandles;
2012 std::vector<armnn::TensorShape> inputShapes;
2013
2014 inputHandles.reserve(numInputTensors);
2015 inputShapes.reserve(numInputTensors);
2016
2017 bool inputsHaveBeenReshaped = false;
2018 unsigned int tensorDimensionsAdded = 0;
2019
2020 for (uint32_t i = 0; i < numInputTensors; ++i)
2021 {
2022 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2023 if (!operand)
2024 {
2025 return Fail("%s: Operation has invalid inputs", __func__);
2026 }
2027
Teresa Charlin3b959602019-10-31 17:05:47 +00002028 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2029 if (!operandInputHandle.IsValid())
2030 {
2031 return Fail("%s: Operation has invalid inputs", __func__);
2032 }
Mike Kellyb8805202019-07-31 17:25:43 +01002033
Teresa Charlin3b959602019-10-31 17:05:47 +00002034 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002035 if (operandShape.GetNumDimensions() == 0)
2036 {
2037 return Fail("%s: Operands with rank 0 are not supported", __func__);
2038 }
2039
2040 if (RequiresReshape(operandShape))
2041 {
2042 inputsHaveBeenReshaped = true;
2043
2044 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2045
2046 // Expand the tensor to three dimensions
2047 if (operandShape.GetNumDimensions() == 2)
2048 {
2049 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2050 tensorDimensionsAdded = 1;
2051 }
2052 else
2053 {
2054 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2055 tensorDimensionsAdded = 2;
2056 }
2057
Kevin Mayaed08ac2019-12-12 16:33:31 +00002058 armnn::ReshapeDescriptor reshapeDescriptor;
2059 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2060
2061 bool isSupported = false;
2062 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2063 IsReshapeSupported,
2064 data.m_Backends,
2065 isSupported,
2066 operandInputHandle.GetTensorInfo(),
2067 reshapeInfo,
2068 reshapeDescriptor);
2069 if (!isSupported)
2070 {
2071 return false;
2072 }
2073
Mike Kellyb8805202019-07-31 17:25:43 +01002074 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
2075 *data.m_Network,
2076 operandInputHandle,
2077 reshapeInfo
2078 );
2079
2080 // Point to the reshape operation rather then the input operation
2081 operandShape = reshapeInfo.GetShape();
2082 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2083 }
2084
2085 inputShapes.emplace_back(operandShape);
2086 inputHandles.emplace_back(operandInputHandle);
2087
2088 if (!inputHandles.back().IsValid())
2089 {
2090 return Fail("%s: Operation has invalid inputs", __func__);
2091 }
2092 }
2093
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002094 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002095
2096 if (inputsHaveBeenReshaped)
2097 {
2098 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2099 concatDim += tensorDimensionsAdded;
2100
2101 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2102 if (tensorDimensionsAdded == 1)
2103 {
2104 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2105 }
2106 else if (tensorDimensionsAdded == 2)
2107 {
2108 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2109 }
2110 }
2111
2112 // Check if permutations is required and get the pair of permutations required for the concatenation.
2113 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2114 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2115 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2116
2117 bool needPermute =
2118 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2119
2120 if (needPermute)
2121 {
Mike Kelly4a956582020-02-28 10:32:09 +00002122 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002123 }
2124
2125 outputInfo.SetShape(outputShape);
2126
2127 // this is no-op for identity swizzles, otherwise it replaces both
2128 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002129 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002130 {
2131 return false;
2132 }
Mike Kellyb8805202019-07-31 17:25:43 +01002133
2134 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2135 armnn::OriginsDescriptor concatDescriptor;
2136
2137 try
2138 {
2139 // The concat descriptor is always created across the only supported concat dimension
2140 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2141 concatDescriptor =
2142 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2143 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002144 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002145 {
2146 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2147 }
2148
2149 // Validate the output shape is correct given the input shapes based on the
2150 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2151 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2152 {
2153 return Fail("%s: Error validating the output shape for concat", __func__);
2154 }
2155
2156 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2157 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2158 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2159
2160 bool isSupported = false;
2161 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2162 IsConcatSupported,
2163 data.m_Backends,
2164 isSupported,
2165 inputTensorInfos,
2166 outputInfo,
2167 concatDescriptor);
2168 if (!isSupported)
2169 {
2170 return false;
2171 }
2172
2173 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2174 assert(layer != nullptr);
2175 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2176
2177 // Connect inputs to the layer
2178 const int numInputSlots = layer->GetNumInputSlots();
2179 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2180 for (int i = 0; i < numInputSlots; ++i)
2181 {
2182 // connect the input directly to the merge (concat) layer
2183 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2184 }
2185
2186 if (needPermute)
2187 {
Mike Kelly4a956582020-02-28 10:32:09 +00002188 armnn::TransposeDescriptor transposeDesc;
2189 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002190 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2191 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2192 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002193
2194 bool isSupported = false;
2195 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002196 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002197 data.m_Backends,
2198 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002199 inputTransposeInfo,
2200 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002201 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002202 if (!isSupported)
2203 {
2204 return false;
2205 }
Mike Kellyb8805202019-07-31 17:25:43 +01002206 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002207 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2208 layer->GetOutputSlot(0),
2209 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002210 layer = &deswizzleLayer;
2211 }
2212
2213 if (inputsHaveBeenReshaped)
2214 {
2215 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2216
2217 // Undo the reshape knowing the amount of dimensions added
2218 if (tensorDimensionsAdded == 1)
2219 {
2220 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2221 afterConcatInfo.GetShape()[2] }));
2222 }
2223 else if (tensorDimensionsAdded == 2)
2224 {
2225 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2226 }
2227
Kevin Mayaed08ac2019-12-12 16:33:31 +00002228 armnn::ReshapeDescriptor reshapeDescriptor;
2229 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2230
2231 bool isSupported = false;
2232 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2233 IsReshapeSupported,
2234 data.m_Backends,
2235 isSupported,
2236 layer->GetOutputSlot(0).GetTensorInfo(),
2237 afterConcatInfo,
2238 reshapeDescriptor);
2239 if (!isSupported)
2240 {
2241 return false;
2242 }
2243
Mike Kellyb8805202019-07-31 17:25:43 +01002244 layer = &AddReshapeLayer(
2245 *data.m_Network,
2246 layer->GetOutputSlot(0),
2247 afterConcatInfo
2248 );
2249 }
2250
2251 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2252}
2253
2254template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002255 typename HalOperation = typename HalPolicy::Operation,
2256 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002257bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2258{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002259 using HalOperand = typename HalPolicy::Operand;
2260 using HalOperandType = typename HalPolicy::OperandType;
2261
2262 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002263 if (!input.IsValid())
2264 {
2265 return Fail("%s: Operation has invalid inputs", __func__);
2266 }
2267
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002268 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002269 if (!output)
2270 {
2271 return Fail("%s: Could not read output 0", __func__);
2272 }
2273
2274 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002275 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002276
2277 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002278 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2279 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002280
2281 if (!weightsPin.IsValid() || !biasPin.IsValid())
2282 {
2283 return Fail("%s: Operation has invalid inputs", __func__);
2284 }
2285
2286 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002287 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002288 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2289
2290 armnn::Convolution2dDescriptor desc;
2291 desc.m_DataLayout = armnn::DataLayout::NHWC;
2292 ActivationFn activation;
2293
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002294 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002295 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002296 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2297 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2298 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2299 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2300 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2301 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002302 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002303 {
2304 return Fail("%s: Operation has invalid inputs", __func__);
2305 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002306 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002307 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002308 {
2309 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002310 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2311 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2312 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002313 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002314 {
2315 return Fail("%s: Operation has invalid inputs", __func__);
2316 }
2317
2318 const uint32_t kernelX = weights.GetShape()[2];
2319 const uint32_t kernelY = weights.GetShape()[1];
2320 const uint32_t inputX = inputInfo.GetShape()[2];
2321 const uint32_t inputY = inputInfo.GetShape()[1];
2322
2323 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2324 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002325 }
2326 else
2327 {
2328 return Fail("%s: Unsupported number of operation inputs", __func__);
2329 }
2330
2331 desc.m_BiasEnabled = true;
2332 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2333
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002334 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002335 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2336 {
2337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2338 IsConvolution2dSupported,
2339 data.m_Backends,
2340 isSupported,
2341 inputInfo,
2342 outputInfo,
2343 desc,
2344 weights.GetInfo(),
2345 biases);
2346 };
2347
2348 if(!IsDynamicTensor(outputInfo))
2349 {
2350 validateFunc(outputInfo, isSupported);
2351 }
2352 else
2353 {
2354 isSupported = AreDynamicTensorsSupported();
2355 }
2356
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002357 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002358 {
2359 return false;
2360 }
2361
2362 armnn::IConnectableLayer* startLayer =
2363 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2364
2365 if (!startLayer)
2366 {
2367 return Fail("%s: AddConvolution2dLayer failed", __func__);
2368 }
2369
2370 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2371
2372 if (!endLayer)
2373 {
2374 return Fail("%s: ProcessActivation failed", __func__);
2375 }
2376
2377 input.Connect(startLayer->GetInputSlot(0));
2378
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002379 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380}
2381
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002382template<typename HalPolicy,
2383 typename HalOperation = typename HalPolicy::Operation,
2384 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002385bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2386{
2387 using HalOperand = typename HalPolicy::Operand;
2388 using HalOperandType = typename HalPolicy::OperandType;
2389
2390 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2391 if (!input.IsValid() )
2392 {
2393 return Fail("%s: Operation has invalid inputs", __func__);
2394 }
2395
2396 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2397 unsigned int rank = inputInfo.GetNumDimensions();
2398 if (rank != 4)
2399 {
2400 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2401 }
2402
2403 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2404 if (!output)
2405 {
2406 return Fail("%s: Could not read output 0", __func__);
2407 }
2408
2409 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002410
2411 armnn::DepthToSpaceDescriptor descriptor;
2412
2413 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2414 if (descriptor.m_BlockSize <= 1)
2415 {
2416 return Fail("%s: Block size must be at least 1 in all dimensions");
2417 }
2418
2419 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002420 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002421 {
2422 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2423 }
2424
2425 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002426 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2427 {
2428 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2429 IsDepthToSpaceSupported,
2430 data.m_Backends,
2431 isSupported,
2432 inputInfo,
2433 outputInfo,
2434 descriptor);
2435 };
2436
2437 if(!IsDynamicTensor(outputInfo))
2438 {
2439 validateFunc(outputInfo, isSupported);
2440 }
2441 else
2442 {
2443 isSupported = AreDynamicTensorsSupported();
2444 }
2445
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002446 if (!isSupported)
2447 {
2448 return false;
2449 }
2450
2451 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2452 assert(layer != nullptr);
2453 input.Connect(layer->GetInputSlot(0));
2454
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002455 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002456}
2457
2458template<typename HalPolicy,
2459 typename HalOperation = typename HalPolicy::Operation,
2460 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002461bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2462{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002463 using HalOperand = typename HalPolicy::Operand;
2464 using HalOperandType = typename HalPolicy::OperandType;
2465
2466 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002467
2468 if (!input.IsValid())
2469 {
2470 return Fail("%s: Operation has invalid inputs", __func__);
2471 }
2472
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002473 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002474
2475 if (!output)
2476 {
2477 return Fail("%s: Could not read output 0", __func__);
2478 }
2479
2480 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002481 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002482
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002483 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002484 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002485 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002486
2487 if (weightsOperand == nullptr)
2488 {
2489 return Fail("%s: Operand is invalid", __func__);
2490 }
2491 armnn::DepthwiseConvolution2dDescriptor desc;
2492 desc.m_DataLayout = armnn::DataLayout::NHWC;
2493
Mike Kellyb5fdf382019-06-11 16:35:25 +01002494 // Reinterpret weight data as [ H, W, I, M ]
2495 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2496 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002497 inputInfo.GetShape()[3],
2498 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002499
2500 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2501 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2502
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002503 const ConstTensorPin weightsPin =
2504 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2505 1,
2506 model,
2507 data,
2508 HWIMToMIHW,
2509 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002510
2511 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002512 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002513
2514 if (!weightsPin.IsValid() || !biasPin.IsValid())
2515 {
2516 return Fail("%s: Operation has invalid inputs", __func__);
2517 }
2518
2519 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2520 armnn::ConstTensor bias = biasPin.GetConstTensor();
2521 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2522
2523 ActivationFn activation;
2524
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002525 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002526 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002527 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2528 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2529 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2530 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2531 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2532 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002533 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002534 {
2535 return Fail("%s: Operation has invalid inputs", __func__);
2536 }
2537 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002538 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002539 {
2540 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002541 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2542 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2543 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002544 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002545 {
2546 return Fail("%s: Operation has invalid inputs", __func__);
2547 }
2548
2549 const uint32_t kernelX = weights.GetShape()[3];
2550 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002551 const uint32_t inputX = inputInfo.GetShape()[2];
2552 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002553
2554 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2555 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2556 }
2557 else
2558 {
2559 return Fail("%s: Unsupported number of operation inputs", __func__);
2560 }
2561
2562 desc.m_BiasEnabled = true;
2563 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2564
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002565 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002566 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2567 {
2568 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2569 IsDepthwiseConvolutionSupported,
2570 data.m_Backends,
2571 isSupported,
2572 inputInfo,
2573 outputInfo,
2574 desc,
2575 weights.GetInfo(),
2576 biases);
2577 };
2578
2579 if(!IsDynamicTensor(outputInfo))
2580 {
2581 validateFunc(outputInfo, isSupported);
2582 }
2583 else
2584 {
2585 isSupported = AreDynamicTensorsSupported();
2586 }
2587
2588
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002589 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002590 {
2591 return false;
2592 }
2593
2594 armnn::IConnectableLayer* startLayer =
2595 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2596 if (!startLayer)
2597 {
2598 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2599 }
2600
2601 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2602 if (!endLayer)
2603 {
2604 return Fail("%s: ProcessActivation failed", __func__);
2605 }
2606
2607 input.Connect(startLayer->GetInputSlot(0));
2608
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002609 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01002610}
2611
Mike Kelly3c673942019-07-25 09:26:06 +01002612template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002613 typename HalOperation = typename HalPolicy::Operation,
2614 typename HalModel = typename HalPolicy::Model>
2615bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002616{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002617 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002618
2619 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2620 if (!input.IsValid())
2621 {
2622 return Fail("%s: Operation has invalid input", __func__);
2623 }
2624
Sadik Armagan98c0f662019-11-21 15:54:36 +00002625 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2626 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2627 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2628 {
2629 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2630 }
2631
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002632 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002633 if (!outputOperand)
2634 {
2635 return Fail("%s: Operation has invalid outputs", __func__);
2636 }
2637
2638 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002639
2640 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002641 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2642 {
2643 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2644 IsDequantizeSupported,
2645 data.m_Backends,
2646 isSupported,
2647 inputInfo,
2648 outputInfo);
2649 };
2650
2651 if(IsDynamicTensor(outputInfo))
2652 {
2653 isSupported = AreDynamicTensorsSupported();
2654 }
2655 else
2656 {
2657 validateFunc(outputInfo, isSupported);
2658 }
2659
Mike Kelly46272802019-08-14 17:00:48 +01002660 if (!isSupported)
2661 {
2662 return false;
2663 }
2664
2665 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2666 assert(layer != nullptr);
2667 input.Connect(layer->GetInputSlot(0));
2668
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002669 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002670}
2671
2672template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002673 typename HalOperation = typename HalPolicy::Operation,
2674 typename HalModel = typename HalPolicy::Model>
2675bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002676{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002677 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002678
2679 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2680 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2681
2682 if (!input0.IsValid() || !input1.IsValid())
2683 {
2684 return Fail("%s: Operation has invalid inputs", __func__);
2685 }
2686
2687 // The FuseActivation parameter is always the input index 2
2688 // and it should be optional
2689 ActivationFn activationFunction;
2690 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2691 {
2692 return Fail("%s: Operation has invalid inputs", __func__);
2693 }
2694
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002695 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002696 if (!output)
2697 {
2698 return Fail("%s: Could not read output 0", __func__);
2699 }
2700
2701 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002702
2703 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002704 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2705 {
2706 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2707 IsDivisionSupported,
2708 data.m_Backends,
2709 isSupported,
2710 input0.GetTensorInfo(),
2711 input1.GetTensorInfo(),
2712 outputInfo);
2713 };
2714
2715 if(!IsDynamicTensor(outputInfo))
2716 {
2717 validateFunc(outputInfo, isSupported);
2718 }
2719 else
2720 {
2721 isSupported = AreDynamicTensorsSupported();
2722 }
2723
Mike Kelly46272802019-08-14 17:00:48 +01002724 if (!isSupported)
2725 {
2726 return false;
2727 }
2728
2729 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2730 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2731
2732 if (endLayer)
2733 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002734 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002735 if (!isReshapeSupported)
2736 {
2737 return false;
2738 }
2739
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002740 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002741 }
2742 return Fail("%s: ProcessActivation failed", __func__);
2743}
2744
2745template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002746 typename HalOperation = typename HalPolicy::Operation,
2747 typename HalModel = typename HalPolicy::Model>
2748bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002749{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002750 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002751
2752 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2753 if (!input.IsValid())
2754 {
2755 return Fail("%s: Operation has invalid inputs", __func__);
2756 }
2757
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002758 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002759 if (!outputOperand)
2760 {
2761 return Fail("%s: Operation has invalid outputs", __func__);
2762 }
2763
2764 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002765
2766 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002767 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2768 {
2769 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2770 IsFloorSupported,
2771 data.m_Backends,
2772 isSupported,
2773 input.GetTensorInfo(),
2774 outputInfo);
2775 };
2776
2777 if(!IsDynamicTensor(outputInfo))
2778 {
2779 validateFunc(outputInfo, isSupported);
2780 }
2781 else
2782 {
2783 isSupported = AreDynamicTensorsSupported();
2784 }
2785
Mike Kelly46272802019-08-14 17:00:48 +01002786 if (!isSupported)
2787 {
2788 return false;
2789 }
2790
2791 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2792 assert(layer != nullptr);
2793 input.Connect(layer->GetInputSlot(0));
2794
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002795 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002796}
2797
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002798inline bool IsQSymm8(const V1_0::Operand&)
2799{
2800 return false;
2801}
2802
Kevin May42477c12020-03-26 13:34:14 +00002803#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002804
2805inline bool IsQSymm8(const V1_2::Operand& operand)
2806{
2807 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2808}
2809
2810#endif
2811
Kevin May42477c12020-03-26 13:34:14 +00002812#ifdef ARMNN_ANDROID_NN_V1_3
2813
2814inline bool IsQSymm8(const V1_3::Operand& operand)
2815{
2816 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2817}
2818
2819#endif
2820
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002821enum class DequantizeStatus
2822{
2823 SUCCESS,
2824 NOT_REQUIRED,
2825 INVALID_OPERAND
2826};
2827
2828using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2829
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002830template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002831 typename HalOperation = typename HalPolicy::Operation,
2832 typename HalModel = typename HalPolicy::Model>
2833DequantizeResult DequantizeIfRequired(size_t operand_index,
2834 const HalOperation& operation,
2835 const HalModel& model,
2836 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002837{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002838 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002839
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002840 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002841 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002842 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002843 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002844 }
2845
2846 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2847 {
2848 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002849 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002850 }
2851
2852 const size_t weightsInputIndex = operation.inputs[operand_index];
2853
2854 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2855 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002856 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002857 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002858 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002859 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002860 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2861 {
2862 continue;
2863 }
2864
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002865 size_t outOpIndex = weightsInputIndex + 1;
2866 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002867 {
2868 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002869 }
2870
2871 if (outOpIndex != weightsInputIndex)
2872 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002873 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002874 }
2875
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002876 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002877 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002878
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002879 if (!IsQSymm8(*operand))
2880 {
2881 // Only supporting dequantize from QSYMM8 to FLOAT
2882 break;
2883 }
2884
2885 // Allocate a new buffer for the dequantized data and manually dequantize
2886 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2887 if (!startValue)
2888 {
2889 // Failed to get the operand address
2890 break;
2891 }
2892
2893 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2894 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002895 const float quantizationScale = operand->scale;
2896
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002897 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2898 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2899 {
2900 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002901 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002902 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2903 }
2904
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002905 // Construct tensor info for dequantized ConstTensor
2906 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2907 operand->dimensions.data(),
2908 armnn::DataType::Float32);
2909
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002910 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2911 std::move(tensorInfo),
2912 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002913 }
2914
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002915 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002916}
2917
2918template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002919 typename HalOperation = typename HalPolicy::Operation,
2920 typename HalModel = typename HalPolicy::Model>
2921ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2922 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002923 const ConversionData& data,
2924 size_t operandIndex,
2925 bool optional = false)
2926{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002927 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2928
2929 DequantizeStatus status = std::get<3>(dequantized);
2930 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002931 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002932 case DequantizeStatus::INVALID_OPERAND:
2933 {
2934 // return invalid const tensor pin
2935 return ConstTensorPin();
2936 }
2937 case DequantizeStatus::NOT_REQUIRED:
2938 {
2939 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2940 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2941 }
2942 case DequantizeStatus::SUCCESS:
2943 default:
2944 {
2945 return ConstTensorPin(
2946 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2947 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002948 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002949}
2950
2951
Mike Kelly46272802019-08-14 17:00:48 +01002952template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002953 typename HalOperation = typename HalPolicy::Operation,
2954 typename HalModel = typename HalPolicy::Model>
2955bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002956{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002957 using HalOperand = typename HalPolicy::Operand;
2958
Mike Kelly46272802019-08-14 17:00:48 +01002959 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2960 if (!input.IsValid())
2961 {
2962 return Fail("%s: Operation has invalid inputs", __func__);
2963 }
2964
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002965 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002966 if (!output)
2967 {
2968 return Fail("%s: Could not read output 0", __func__);
2969 }
2970
2971 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2972 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2973
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002974 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2975 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002976
2977 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002978 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002979 return Fail("%s: Operation has invalid weights", __func__);
2980 }
2981
2982 if (!biasPin.IsValid())
2983 {
2984 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002985 }
2986
2987 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2988 armnn::ConstTensor bias = biasPin.GetConstTensor();
2989 armnn::TensorInfo reshapedInfo = inputInfo;
2990
2991 try
2992 {
2993 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002994 }
2995 catch (const std::exception& e)
2996 {
Mike Kelly46272802019-08-14 17:00:48 +01002997 return Fail("%s: %s", __func__, e.what());
2998 }
2999
3000 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3001 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
3002
3003 ActivationFn activationFunction;
3004 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3005 {
3006 return Fail("%s: Operation has invalid inputs", __func__);
3007 }
3008
3009 armnn::FullyConnectedDescriptor desc;
3010 desc.m_TransposeWeightMatrix = true;
3011 desc.m_BiasEnabled = true;
3012
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00003013 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3014 weights.GetInfo().GetShape(),
3015 outputInfo.GetShape(),
3016 desc.m_TransposeWeightMatrix))
3017 {
3018 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3019 }
3020
Mike Kelly46272802019-08-14 17:00:48 +01003021 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003022 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3023 {
3024 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly46272802019-08-14 17:00:48 +01003025 IsFullyConnectedSupported,
3026 data.m_Backends,
3027 isSupported,
3028 reshapedInfo,
3029 outputInfo,
3030 weights.GetInfo(),
3031 bias.GetInfo(),
3032 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003033 };
3034
3035 if(!IsDynamicTensor(outputInfo))
3036 {
3037 validateFunc(outputInfo, isSupported);
3038 }
3039 else
3040 {
3041 isSupported = AreDynamicTensorsSupported();
3042 }
3043
Mike Kelly46272802019-08-14 17:00:48 +01003044 if (!isSupported)
3045 {
3046 return false;
3047 }
3048
3049 armnn::IConnectableLayer* startLayer =
3050 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
3051 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3052
3053 if (endLayer != nullptr)
3054 {
3055 if (inputInfo.GetNumDimensions() > 2U)
3056 {
3057 armnn::ReshapeDescriptor reshapeDescriptor;
3058 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3059
3060 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3061 assert(reshapeLayer != nullptr);
3062 input.Connect(reshapeLayer->GetInputSlot(0));
3063 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3064 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3065 }
3066 else
3067 {
3068 input.Connect(startLayer->GetInputSlot(0));
3069 }
3070
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003071 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003072 }
3073 else
3074 {
3075 return Fail("%s: ProcessActivation failed", __func__);
3076 }
3077}
3078
3079template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003080 typename HalOperation = typename HalPolicy::Operation,
3081 typename HalModel = typename HalPolicy::Model>
3082bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003083{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003084 using HalOperand = typename HalPolicy::Operand;
3085
Mike Kelly999e2092019-08-15 10:46:46 +01003086 if (operation.inputs.size() != 1)
3087 {
3088 return Fail("%s: Optional inputs are not supported", __func__);
3089 }
3090
Mike Kelly46272802019-08-14 17:00:48 +01003091 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3092 if (!input.IsValid())
3093 {
3094 return Fail("%s: Operation has invalid inputs", __func__);
3095 }
3096
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003097 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003098 if (!output)
3099 {
3100 return Fail("%s: Could not read output 0", __func__);
3101 }
3102
3103 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3104 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3105
Mike Kelly46272802019-08-14 17:00:48 +01003106 if (outputInfo.GetNumDimensions() != 4u)
3107 {
3108 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3109 }
3110
3111 armnn::L2NormalizationDescriptor desc;
3112 desc.m_DataLayout = armnn::DataLayout::NHWC;
3113
3114 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003115 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3116 {
3117 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3118 IsL2NormalizationSupported,
3119 data.m_Backends,
3120 isSupported,
3121 inputInfo,
3122 outputInfo,
3123 desc);
3124 };
3125
3126 if(!IsDynamicTensor(outputInfo))
3127 {
3128 validateFunc(outputInfo, isSupported);
3129 }
3130 else
3131 {
3132 isSupported = AreDynamicTensorsSupported();
3133 }
3134
Mike Kelly46272802019-08-14 17:00:48 +01003135 if (!isSupported)
3136 {
3137 return false;
3138 }
3139
3140 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3141 assert(layer != nullptr);
3142 input.Connect(layer->GetInputSlot(0));
3143
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003144 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003145}
3146
3147template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003148 typename HalOperation = typename HalPolicy::Operation,
3149 typename HalModel = typename HalPolicy::Model>
3150bool ConvertLocalResponseNormalization(const HalOperation& operation,
3151 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003152 ConversionData& data)
3153{
Mike Kelly999e2092019-08-15 10:46:46 +01003154 if (operation.inputs.size() != 5)
3155 {
3156 return Fail("%s: Optional inputs are not supported", __func__);
3157 }
3158
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003159 using HalOperand = typename HalPolicy::Operand;
3160 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003161
3162 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3163 if (!input.IsValid())
3164 {
3165 return Fail("%s: Operation has invalid inputs", __func__);
3166 }
3167
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003168 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003169 if (!output)
3170 {
3171 return Fail("%s: Could not read output 0", __func__);
3172 }
3173
3174 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3175 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3176
Mike Kelly46272802019-08-14 17:00:48 +01003177 if (outputInfo.GetNumDimensions() != 4u)
3178 {
3179 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3180 }
3181
3182 armnn::NormalizationDescriptor descriptor;
3183 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3184 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3185 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3186
3187 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003188 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003189 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3190 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3191 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3192 {
3193 return Fail("%s: Operation has invalid inputs", __func__);
3194 }
3195
3196 // ArmNN expects normSize to be the full size of the normalization
3197 // window rather than the radius as in AndroidNN.
3198 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3199
3200 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003201 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3202 {
3203 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3204 IsNormalizationSupported,
3205 data.m_Backends,
3206 isSupported,
3207 inputInfo,
3208 outputInfo,
3209 descriptor);
3210 };
3211
3212 if(!IsDynamicTensor(outputInfo))
3213 {
3214 validateFunc(outputInfo, isSupported);
3215 }
3216 else
3217 {
3218 isSupported = AreDynamicTensorsSupported();
3219 }
3220
Mike Kelly46272802019-08-14 17:00:48 +01003221 if (!isSupported)
3222 {
3223 return false;
3224 }
3225
3226
3227 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3228 assert(layer != nullptr);
3229 input.Connect(layer->GetInputSlot(0));
3230
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003231 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003232}
3233
3234template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003235 typename HalOperation = typename HalPolicy::Operation,
3236 typename HalModel = typename HalPolicy::Model>
3237bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003238{
Mike Kelly46272802019-08-14 17:00:48 +01003239 armnn::ActivationDescriptor desc;
3240 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3241
3242 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3243}
3244
3245template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003246 typename HalOperation = typename HalPolicy::Operation,
3247 typename HalModel = typename HalPolicy::Model>
3248bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003249{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003250 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003251
3252 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3253 if (!input.IsValid())
3254 {
3255 return Fail("%s: Operation has invalid inputs", __func__);
3256 }
3257
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003258 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003259 if (!output)
3260 {
3261 return Fail("%s: Could not read output 0", __func__);
3262 }
3263
3264 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003265
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003266 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003267 if (!axisOperand)
3268 {
3269 return Fail("%s: Could not read input 1", __func__);
3270 }
3271
3272 std::vector<int32_t> axis;
3273 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3274 {
3275 return Fail("%s: Input 1 has invalid values", __func__);
3276 }
3277
3278 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3279
3280 // Convert the axis to unsigned int and remove duplicates.
3281 unsigned int rank = inputInfo.GetNumDimensions();
3282 std::set<unsigned int> uniqueAxis;
3283 std::transform(axis.begin(), axis.end(),
3284 std::inserter(uniqueAxis, uniqueAxis.begin()),
3285 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3286
3287 // Get the "keep dims" flag.
3288 int32_t keepDims = 0;
3289 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3290 {
3291 return Fail("%s: Could not read input 2", __func__);
3292 }
3293
3294 armnn::MeanDescriptor descriptor;
3295 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3296 descriptor.m_KeepDims = keepDims > 0;
3297
3298 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003299 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3300 {
3301 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3302 IsMeanSupported,
3303 data.m_Backends,
3304 isSupported,
3305 inputInfo,
3306 outputInfo,
3307 descriptor);
3308 };
3309
3310 if(!IsDynamicTensor(outputInfo))
3311 {
3312 validateFunc(outputInfo, isSupported);
3313 }
3314 else
3315 {
3316 isSupported = AreDynamicTensorsSupported();
3317 }
3318
Mike Kelly46272802019-08-14 17:00:48 +01003319 if (!isSupported)
3320 {
3321 return false;
3322 }
3323
3324 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3325 assert(layer != nullptr);
3326 input.Connect(layer->GetInputSlot(0));
3327
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003328 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003329}
3330
3331template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003332 typename HalOperation = typename HalPolicy::Operation,
3333 typename HalModel = typename HalPolicy::Model>
3334bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003335{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003336 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003337
3338 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3339 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3340
3341 if (!input0.IsValid() || !input1.IsValid())
3342 {
3343 return Fail("%s: Operation has invalid inputs", __func__);
3344 }
3345
3346 // The FuseActivation parameter is always the input index 2
3347 // and it should be optional
3348 ActivationFn activationFunction;
3349 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3350 {
3351 return Fail("%s: Operation has invalid inputs", __func__);
3352 }
3353
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003354 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003355
3356 if (outputOperand == nullptr)
3357 {
3358 return false;
3359 }
3360
3361 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003362
3363 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003364 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3365 {
3366 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3367 IsMultiplicationSupported,
3368 data.m_Backends,
3369 isSupported,
3370 input0.GetTensorInfo(),
3371 input1.GetTensorInfo(),
3372 outputInfo);
3373 };
3374
3375 if(!IsDynamicTensor(outputInfo))
3376 {
3377 validateFunc(outputInfo, isSupported);
3378 }
3379 else
3380 {
3381 isSupported = AreDynamicTensorsSupported();
3382 }
3383
Mike Kelly46272802019-08-14 17:00:48 +01003384 if (!isSupported)
3385 {
3386 return false;
3387 }
3388
3389 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3390 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3391
3392 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3393 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3394
3395 if (endLayer != nullptr)
3396 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003397 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003398 if (!isReshapeSupported)
3399 {
3400 return false;
3401 }
3402
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003403 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003404 }
3405 else
3406 {
3407 return Fail("%s: ProcessActivation failed", __func__);
3408 }
3409}
3410
3411template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003412 typename HalOperation = typename HalPolicy::Operation,
3413 typename HalModel = typename HalPolicy::Model>
3414bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003415{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003416 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003417
Mike Kelly3c673942019-07-25 09:26:06 +01003418 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3419 if (!input.IsValid())
3420 {
3421 return Fail("%s: Operation has invalid inputs", __func__);
3422 }
3423
3424 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3425 unsigned int rank = inputInfo.GetNumDimensions();
3426
3427 armnn::PadDescriptor descriptor;
3428 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3429 {
3430 return Fail("%s: Could not convert paddings", __func__);
3431 }
3432
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003433 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3434 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003435 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3436 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3437 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003438 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003439 {
3440 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3441 }
3442
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003443 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003444 if (!output)
3445 {
3446 return Fail("%s: Could not read output", __func__);
3447 }
3448
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003449 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003450
3451 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003452 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3453 {
3454 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3455 IsPadSupported,
3456 data.m_Backends,
3457 isSupported,
3458 inputInfo,
3459 outputInfo,
3460 descriptor);
3461 };
3462
3463 if(!IsDynamicTensor(outputInfo))
3464 {
3465 validateFunc(outputInfo, isSupported);
3466 }
3467 else
3468 {
3469 isSupported = AreDynamicTensorsSupported();
3470 }
3471
Mike Kelly3c673942019-07-25 09:26:06 +01003472 if (!isSupported)
3473 {
3474 return false;
3475 }
3476
3477 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3478 assert(layer != nullptr);
3479 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003480
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003481 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003482}
3483
Mike Kelly0a879362019-07-29 16:56:31 +01003484template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003485 typename HalOperation = typename HalPolicy::Operation,
3486 typename HalModel = typename HalPolicy::Model>
3487bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003488{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003489 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003490
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003491 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3492 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3493 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003494
3495 if (inputOperand == nullptr
3496 || requestedShapeOperand == nullptr
3497 || outputOperand == nullptr)
3498 {
3499 return Fail("%s: Operation has invalid inputs", __func__);
3500 }
3501
3502 if (requestedShapeOperand->dimensions.size() != 1)
3503 {
3504 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3505 __func__, requestedShapeOperand->dimensions.size());
3506 }
3507
3508 std::vector<int32_t> targetDimensions;
3509 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3510 {
3511 return Fail("%s: Could not read values of input 1", __func__);
3512 }
3513
3514 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3515
3516 Shape requestedShape;
3517 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3518 // function that resolves these values into a fully specified tensor shape.
3519 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3520 {
3521 return Fail("%s: Failed to resolve the requested shape", __func__);
3522 }
3523
Mike Kelly46272802019-08-14 17:00:48 +01003524 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3525 if (!input.IsValid())
3526 {
3527 return Fail("%s: Could not read input 0", __func__);
3528 }
3529
3530 armnn::ReshapeDescriptor reshapeDescriptor;
3531 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3532 requestedShape.dimensions.data());
3533
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003534 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3535
Mike Kelly46272802019-08-14 17:00:48 +01003536 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003537 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3538 {
3539 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3540 IsReshapeSupported,
3541 data.m_Backends,
3542 isSupported,
3543 input.GetTensorInfo(),
3544 outputInfo,
3545 reshapeDescriptor);
3546 };
3547
3548 if(!IsDynamicTensor(outputInfo))
3549 {
3550 validateFunc(outputInfo, isSupported);
3551 }
3552 else
3553 {
3554 isSupported = AreDynamicTensorsSupported();
3555 }
3556
Mike Kelly46272802019-08-14 17:00:48 +01003557 if (!isSupported)
3558 {
3559 return false;
3560 }
3561
3562 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3563 assert(layer != nullptr);
3564 input.Connect(layer->GetInputSlot(0));
3565
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003566 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003567}
3568
3569template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003570 typename HalOperation = typename HalPolicy::Operation,
3571 typename HalModel = typename HalPolicy::Model>
3572bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003573{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003574 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003575
Mike Kelly0a879362019-07-29 16:56:31 +01003576 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3577 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3578
3579 if (!input0.IsValid() || !input1.IsValid())
3580 {
3581 return Fail("%s: Operation has invalid inputs", __func__);
3582 }
3583
3584 // The FuseActivation parameter is always the input index 2
3585 // and it should be optional
3586 ActivationFn activationFunction;
3587 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3588 {
3589 return Fail("%s: Operation has invalid inputs", __func__);
3590 }
3591
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003592 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003593 if (!output)
3594 {
3595 return Fail("%s: Could not read output 0", __func__);
3596 }
3597
3598 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003599
3600 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003601 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3602 {
3603 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3604 IsSubtractionSupported,
3605 data.m_Backends,
3606 isSupported,
3607 input0.GetTensorInfo(),
3608 input1.GetTensorInfo(),
3609 outputInfo);
3610 };
3611
3612 if(IsDynamicTensor(outputInfo))
3613 {
3614 isSupported = AreDynamicTensorsSupported();
3615 }
3616 else
3617 {
3618 validateFunc(outputInfo, isSupported);
3619 }
3620
Mike Kelly0a879362019-07-29 16:56:31 +01003621 if (!isSupported)
3622 {
3623 return false;
3624 }
3625
3626 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3627 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3628
3629 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3630 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3631
3632 if (endLayer)
3633 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003634 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003635 if (!isReshapeSupported)
3636 {
3637 return false;
3638 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003639 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly0a879362019-07-29 16:56:31 +01003640 }
3641
3642 return Fail("%s: ProcessActivation failed", __func__);
3643}
3644
Finn Williams23b87b32019-07-30 11:44:05 +01003645template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003646 typename HalOperation = typename HalPolicy::Operation,
3647 typename HalModel = typename HalPolicy::Model>
3648bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003649{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003650 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003651
3652 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3653 if (!input.IsValid())
3654 {
3655 return Fail("%s: Operation has invalid inputs", __func__);
3656 }
3657
3658 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3659 unsigned int rank = inputInfo.GetNumDimensions();
3660 if (rank > 4)
3661 {
3662 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3663 }
3664
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003665 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003666 if (!output)
3667 {
3668 return Fail("%s: Could not read output 0", __func__);
3669 }
Mike Kelly46272802019-08-14 17:00:48 +01003670 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3671 {
3672 return Fail("%s: Dynamic output tensors are not supported", __func__);
3673 }
3674
3675 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3676 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003677 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003678
3679 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3680
3681 std::vector<int32_t> axis;
3682 if (!axisOperand)
3683 {
3684 axis.assign(dimensionSequence,
3685 dimensionSequence + rank);
3686 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003687 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003688 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003689 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003690 }
3691
3692 std::vector<uint32_t> outputDims;
3693 for (unsigned int i = 0; i < rank; i++)
3694 {
3695 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3696 auto currentDimension = inputInfo.GetShape()[i];
3697 if (skipSqueeze || currentDimension != 1)
3698 {
3699 outputDims.push_back(currentDimension);
3700 }
3701 }
3702
3703 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3704
3705 armnn::TensorInfo outputInfo = inputInfo;
3706 outputInfo.SetShape(outShape);
3707
3708 armnn::ReshapeDescriptor reshapeDesc;
3709 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3710
3711 bool isSupported = false;
3712 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3713 IsReshapeSupported,
3714 data.m_Backends,
3715 isSupported,
3716 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003717 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003718 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003719
Mike Kelly46272802019-08-14 17:00:48 +01003720 if (!isSupported)
3721 {
3722 return false;
3723 }
3724
3725 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3726 assert(layer != nullptr);
3727 input.Connect(layer->GetInputSlot(0));
3728
3729 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3730}
3731
3732template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003733 typename HalOperation = typename HalPolicy::Operation,
3734 typename HalModel = typename HalPolicy::Model>
3735bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003736{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003737 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003738
3739 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3740 if (!input.IsValid())
3741 {
3742 return Fail("%s: Operation has invalid inputs", __func__);
3743 }
3744
3745 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3746 unsigned int rank = inputInfo.GetNumDimensions();
3747 if (rank > 4)
3748 {
3749 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3750 }
3751
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003752 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003753 if (!output)
3754 {
3755 return Fail("%s: Could not read output 0", __func__);
3756 }
3757
3758 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003759
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003760 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3761 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3762 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003763
3764 std::vector<int32_t> beginValues;
3765 std::vector<int32_t> endValues;
3766 std::vector<int32_t> stridesValues;
3767
3768 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003769 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003770 {
3771 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3772 {
3773 return false;
3774 }
3775
3776 if (operandValues.size() != rank)
3777 {
3778 return false;
3779 }
3780
3781 return true;
3782 };
3783
3784 if (!ValidateInputOperands(*beginOperand, beginValues)
3785 || !ValidateInputOperands(*endOperand, endValues)
3786 || !ValidateInputOperands(*stridesOperand, stridesValues))
3787 {
3788 return Fail("%s: Operation has invalid input operand", __func__);
3789 }
3790
3791 // Stride cannot have value '0'
3792 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3793 {
3794 return Fail("%s: Stride must be non-zero value.", __func__);
3795 }
3796
3797 armnn::StridedSliceDescriptor descriptor;
3798 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3799 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3800 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3801 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3802
3803 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3804 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3805 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3806 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3807 {
3808 return Fail("%s: Operation has invalid inputs", __func__);
3809 }
3810
3811 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003812 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3813 {
3814 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3815 IsStridedSliceSupported,
3816 data.m_Backends,
3817 isSupported,
3818 inputInfo,
3819 outputInfo,
3820 descriptor);
3821 };
3822
3823 if(IsDynamicTensor(outputInfo))
3824 {
3825 isSupported = AreDynamicTensorsSupported();
3826 }
3827 else
3828 {
3829 validateFunc(outputInfo, isSupported);
3830 }
3831
Mike Kelly46272802019-08-14 17:00:48 +01003832 if (!isSupported)
3833 {
3834 return false;
3835 }
3836
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003837 // Check if slice can fit in a inferred output
3838 armnn::TensorShape inputShape = inputInfo.GetShape();
3839 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3840 {
3841 int stride = descriptor.m_Stride[i];
3842 int start = descriptor.GetStartForAxis(inputShape, i);
3843 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3844
3845 if (descriptor.m_ShrinkAxisMask & (1 << i))
3846 {
3847 // If the difference between the start point and the end point of the slice on an axis being shrunk
3848 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3849 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3850 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3851 {
3852 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3853 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003854
3855 if(stride < 0)
3856 {
3857 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3858 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003859 }
3860 }
3861
Mike Kelly46272802019-08-14 17:00:48 +01003862 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3863 assert(layer != nullptr);
3864 input.Connect(layer->GetInputSlot(0));
3865
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003866 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003867}
3868
3869template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003870 typename HalOperation = typename HalPolicy::Operation,
3871 typename HalModel = typename HalPolicy::Model>
3872bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003873{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003874 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003875 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003876
3877 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3878 if (!input.IsValid())
3879 {
3880 return Fail("%s: Operation has invalid inputs", __func__);
3881 }
3882
3883 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3884 unsigned int rank = inputInfo.GetNumDimensions();
3885 if (rank > 4)
3886 {
3887 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3888 }
3889
3890 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3891 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003892 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003893
3894 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003895 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003896 {
Mike Kelly46272802019-08-14 17:00:48 +01003897 for (unsigned int i = rank; i > 0; i--)
3898 {
3899 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3900 }
3901 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003902 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003903 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003904 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003905 }
3906
3907 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3908
Mike Kelly4a956582020-02-28 10:32:09 +00003909 armnn::TransposeDescriptor transposeDesc;
3910 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003911
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003912 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003913 if (!output)
3914 {
3915 return Fail("%s: Could not read output 0", __func__);
3916 }
3917
3918 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3919
3920 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003921 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3922 {
3923 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3924 IsTransposeSupported,
3925 data.m_Backends,
3926 isSupported,
3927 inputInfo,
3928 outputInfo,
3929 transposeDesc);
3930 };
3931
3932 if(IsDynamicTensor(outputInfo))
3933 {
3934 isSupported = AreDynamicTensorsSupported();
3935 }
3936 else
3937 {
3938 validateFunc(outputInfo, isSupported);
3939 }
3940
Mike Kelly46272802019-08-14 17:00:48 +01003941 if (!isSupported)
3942 {
3943 return false;
3944 }
3945
Mike Kelly4a956582020-02-28 10:32:09 +00003946 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003947 assert(layer != nullptr);
3948 input.Connect(layer->GetInputSlot(0));
3949
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003950 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003951}
3952
3953template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003954 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003955 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003956 typename HalModel = typename HalPolicy::Model>
3957bool ConvertBatchToSpaceNd(const HalOperation& operation,
3958 const HalModel& model,
3959 ConversionData& data)
3960{
Finn Williams23b87b32019-07-30 11:44:05 +01003961
3962 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3963 if (!input.IsValid())
3964 {
3965 return Fail("%s: Operation has invalid inputs", __func__);
3966 }
3967
3968 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3969 if (!output)
3970 {
3971 return Fail("%s: Could not read output 0", __func__);
3972 }
3973
3974 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01003975
3976 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3977 if (!blockOperand)
3978 {
3979 return Fail("%s: Could not read input 1", __func__);
3980 }
3981
3982 // Convert the block operand to int32
3983 std::vector<int32_t> block;
3984 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3985 {
3986 return Fail("%s: Input 1 has invalid values", __func__);
3987 }
3988
3989 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3990
3991 unsigned int rank = inputInfo.GetNumDimensions();
3992 if (rank != 4)
3993 {
3994 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3995 }
3996
3997 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3998 {
3999 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4000 " greater than or equal to 1", __func__);
4001 }
4002
4003 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4004 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4005 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4006
Kevin May42477c12020-03-26 13:34:14 +00004007 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004008 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004009 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004010 }
4011 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4012 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4013
4014 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004015 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4016 {
4017 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4018 IsBatchToSpaceNdSupported,
4019 data.m_Backends,
4020 isSupported,
4021 inputInfo,
4022 outputInfo,
4023 batchToSpaceNdDesc);
4024 };
4025
4026 if(!IsDynamicTensor(outputInfo))
4027 {
4028 validateFunc(outputInfo, isSupported);
4029 }
4030 else
4031 {
4032 isSupported = AreDynamicTensorsSupported();
4033 }
4034
4035
Finn Williams23b87b32019-07-30 11:44:05 +01004036 if (!isSupported)
4037 {
4038 return false;
4039 }
4040
4041 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4042 assert(layer != nullptr);
4043 input.Connect(layer->GetInputSlot(0));
4044
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004045 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004046}
Mike Kelly0a879362019-07-29 16:56:31 +01004047
Finn Williamsd74c5052019-07-30 17:06:00 +01004048template<typename HalPolicy,
4049 typename HalOperation = typename HalPolicy::Operation,
4050 typename HalOperand = typename HalPolicy::Operand,
4051 typename HalModel = typename HalPolicy::Model>
4052bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4053{
4054 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4055 if (!input.IsValid())
4056 {
4057 return Fail("%s: Operation has invalid inputs", __func__);
4058 }
4059
4060 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4061 unsigned int rank = inputInfo.GetNumDimensions();
4062 unsigned int spatialDim = rank - 2;
4063
4064 if (rank != 4)
4065 {
4066 Fail("%s: Only inputs with rank 4 are supported", __func__);
4067 }
4068
4069 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4070 if (!output)
4071 {
4072 return Fail("%s: Could not read output 0", __func__);
4073 }
4074
4075 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004076
4077 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4078 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4079
4080 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4081 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4082 {
4083 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4084 }
4085
4086 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004087 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4088 {
4089 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4090 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004091 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4092 {
4093 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4094 }
4095
4096 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4097 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4098 {
4099 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4100 }
4101
4102 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4103 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004104 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4105 {
4106 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4107 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004108 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4109 {
4110 int paddingBeforeInput = paddings[i];
4111 int paddingAfterInput = paddings[i + 1];
4112 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4113 {
4114 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4115 }
4116
4117 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4118 }
4119
4120 armnn::SpaceToBatchNdDescriptor descriptor;
4121 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4122 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4123 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4124
Kevin May42477c12020-03-26 13:34:14 +00004125 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004126 {
4127 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4128 }
4129
4130 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004131 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4132 {
4133 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4134 IsSpaceToBatchNdSupported,
4135 data.m_Backends,
4136 isSupported,
4137 inputInfo,
4138 outputInfo,
4139 descriptor);
4140 };
4141
4142 if(IsDynamicTensor(outputInfo))
4143 {
4144 isSupported = AreDynamicTensorsSupported();
4145 }
4146 else
4147 {
4148 validateFunc(outputInfo, isSupported);
4149 }
4150
Finn Williamsd74c5052019-07-30 17:06:00 +01004151 if (!isSupported)
4152 {
4153 return false;
4154 }
4155
4156 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4157 assert(layer != nullptr);
4158 input.Connect(layer->GetInputSlot(0));
4159
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004160 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004161}
4162
saoste01b8471482018-10-10 09:44:51 +01004163} // namespace armnn_driver