blob: c484f3316b439999fd86704a34384cf22a0cad50 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010015#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000017#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000018#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010019
Mike Kelly46272802019-08-14 17:00:48 +010020#include "1.0/FullyConnected.hpp"
21
arovir01b0717b52018-09-05 17:03:25 +010022#include <ActivationFunctor.h>
23#include <CpuExecutor.h>
24#include <OperationsUtils.h>
25
James Ward4e22f602020-10-20 15:50:33 +010026#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010027
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010047 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010048 {}
49
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010050 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010051 armnn::INetworkPtr m_Network;
52 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
53 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010054 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010055};
56
57class LayerInputHandle
58{
59public:
60 LayerInputHandle();
61 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
62
63 bool IsValid() const;
64
65 void Connect(armnn::IInputSlot& inputSlot);
66
Finn Williamsa4983ce2020-07-23 12:55:12 +010067 void Disconnect(armnn::IInputSlot& inputSlot);
68
arovir01b0717b52018-09-05 17:03:25 +010069 const armnn::TensorInfo& GetTensorInfo() const;
70
71private:
72 armnn::IOutputSlot* m_OutputSlot;
73 bool m_Valid;
74 armnn::TensorInfo m_TensorInfo;
75};
76
77class ConstTensorPin
78{
79public:
80 // Creates an invalid tensor pin (can be used to signal errors)
81 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
82 ConstTensorPin(bool optional = false);
83
84 // @param tensorInfo TensorInfo associated with the tensor.
85 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
86 // the model being converted.
87 // @param numBytes Number of bytes for the tensor data.
88 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
89 const armnn::PermutationVector& mappings);
90
91 ConstTensorPin(const ConstTensorPin& other) = delete;
92 ConstTensorPin(ConstTensorPin&& other) = default;
93
94 bool IsValid() const;
95 bool IsOptional() const;
96
97 const armnn::ConstTensor& GetConstTensor() const;
98 const armnn::ConstTensor* GetConstTensorPtr() const;
99
100private:
101 armnn::ConstTensor m_ConstTensor;
102
103 // Owned memory for swizzled tensor data, only required if the tensor needed
104 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
105 // the pools associated with the model being converted.
106 std::vector<uint8_t> m_SwizzledTensorData;
107
108 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
109 bool m_Optional;
110};
111
112} // namespace armnn_driver
113
114///
115/// Utility functions
116///
117
118namespace
119{
120
121using namespace armnn_driver;
122using namespace android::nn;
123
124// Convenience function to log the reason for failing to convert a model.
125// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
126template<class... Args>
127static bool Fail(const char* formatStr, Args&&... args)
128{
129 ALOGD(formatStr, std::forward<Args>(args)...);
130 return false;
131}
132
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100133// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
134// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
135#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100136try \
137{ \
138 for (auto&& backendId : backends) \
139 { \
140 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000141 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100142 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100143 std::string reasonIfUnsupported; \
144 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000145 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100146 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100149 } \
150 else \
151 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 if (reasonIfUnsupported.size() > 0) \
153 { \
154 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
155 } \
156 else \
157 { \
158 ALOGD("%s: not supported by armnn", funcName); \
159 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100160 } \
161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100163 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100164 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100165 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100166 } \
167 if (!supported) \
168 { \
169 ALOGD("%s: not supported by any specified backend", funcName); \
170 } \
171} \
172catch (const armnn::InvalidArgumentException &e) \
173{ \
174 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
175}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100176
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000177template<typename HalOperand>
178armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100179{
180 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
181}
182
Matthew Bentham912b3622019-05-03 15:49:14 +0100183inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100184{
Matthew Bentham912b3622019-05-03 15:49:14 +0100185 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
186 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100188}
189
Kevin May42477c12020-03-26 13:34:14 +0000190#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100191
Keith Davis71006492020-01-06 17:44:16 +0000192// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100193inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
194{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000195 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000196 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_FLOAT16 ||
198 type == V1_2::OperandType::TENSOR_FLOAT32 ||
199 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000200 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
202 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203 type == V1_2::OperandType::TENSOR_INT32;
204}
205
206#endif
207
Kevin May42477c12020-03-26 13:34:14 +0000208#ifdef ARMNN_ANDROID_NN_V1_3
209
210// Support within the 1.3 driver for specific tensor data types
211inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
212{
213 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_FLOAT16 ||
216 type == V1_3::OperandType::TENSOR_FLOAT32 ||
217 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100218 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000219 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
220 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
221 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
222 type == V1_3::OperandType::TENSOR_INT32;
223}
224
225#endif
226
Mike Kellyb5fdf382019-06-11 16:35:25 +0100227inline bool IsBool(V1_0::Operand)
228{
229 return false;
230}
231
Kevin May42477c12020-03-26 13:34:14 +0000232inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100233{
234 return false;
235}
236
Kevin May42477c12020-03-26 13:34:14 +0000237#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100238
239inline bool IsBool(V1_2::Operand operand)
240{
241 return operand.type == V1_2::OperandType::BOOL;
242}
243
Sadik Armagan61113162019-07-25 09:09:40 +0100244/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000245inline bool Is12OrLaterOperand(V1_2::Operand)
246{
247 return true;
248}
249
250#endif
251
252#ifdef ARMNN_ANDROID_NN_V1_3
253
254inline bool IsBool(V1_3::Operand operand)
255{
256 return operand.type == V1_3::OperandType::BOOL;
257}
258
259/// Checks if a operand is 1_2 Operand
260inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100261{
262 return true;
263}
264
Mike Kellyb5fdf382019-06-11 16:35:25 +0100265#endif
266
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100267template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000268armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
269 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100270 armnn::TensorInfo reshapeInfo)
271{
272 armnn::ReshapeDescriptor reshapeDescriptor;
273 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
274
275 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100276 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100277
278 // Attach the input layer to the reshape layer
279 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
280 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
281
282 return *reshapeLayer;
283}
284
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000285bool BroadcastTensor(LayerInputHandle& input0,
286 LayerInputHandle& input1,
287 armnn::IConnectableLayer* startLayer,
288 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100289{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100290 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100291
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100292 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
293 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
294
295 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
296 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
297
298 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100299 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100300 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
301 input0.Connect(startLayer->GetInputSlot(0));
302 input1.Connect(startLayer->GetInputSlot(1));
303
Sadik Armagan64b19b52019-08-19 09:49:58 +0100304 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 }
306
307 // Since the number of dimensions do not match then we need to add degenerate dimensions
308 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
309
310 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100311 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
312 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313
314 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
315 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
316 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
317
318 const armnn::TensorShape& smallShape = smallInfo.GetShape();
319 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
320 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
321 {
322 reshapedDimensions[i] = smallShape[i - sizeDifference];
323 }
324
325 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100326 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100327 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100328
329 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
330 armnn::ReshapeDescriptor reshapeDescriptor;
331
332 bool isSupported = false;
333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
334 IsReshapeSupported,
335 data.m_Backends,
336 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000337 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100338 reshapedInfo,
339 reshapeDescriptor);
340 if (!isSupported)
341 {
342 return false;
343 }
344
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100345 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100346 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100347
348 if (input0IsSmaller)
349 {
350 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
351 //
352 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100353 // | |
354 // Reshape |
355 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100356 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100357
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100358 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
359 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100360 }
361 else
362 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100363 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
364 //
365 // Input0 Input1
366 // | |
367 // | Reshape
368 // \ /
369 // StartLayer
370
arovir01b0717b52018-09-05 17:03:25 +0100371 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100372 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100373 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100374
375 return true;
arovir01b0717b52018-09-05 17:03:25 +0100376}
377
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000378void CalcPadding(uint32_t input,
379 uint32_t kernel,
380 uint32_t stride,
381 uint32_t& outPadHead,
382 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100383 android::nn::PaddingScheme scheme)
384{
385 int32_t padHead;
386 int32_t padTail;
387 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100388 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
389 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100390}
391
Kevin May42477c12020-03-26 13:34:14 +0000392#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100393
394void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
395 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
396{
397 int32_t padHead;
398 int32_t padTail;
399 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100400 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
401 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100402}
403
Mike Kelly26123db2020-01-15 10:02:33 +0000404void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100405 int32_t& outPadTail, android::nn::PaddingScheme scheme)
406{
407 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
408}
409
Mike Kelly86b36d42019-07-12 16:39:33 +0100410#endif
411
Matthew Bentham912b3622019-05-03 15:49:14 +0100412Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100413{
414 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100415 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100416 shape.dimensions = operand.dimensions;
417 shape.scale = operand.scale;
418 shape.offset = operand.zeroPoint;
419 return shape;
420}
421
Kevin May42477c12020-03-26 13:34:14 +0000422#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100423
424Shape GetOperandShape(const V1_2::Operand& operand)
425{
426 Shape shape;
427 shape.type = OperandType(operand.type);
428 shape.dimensions = operand.dimensions;
429 shape.scale = operand.scale;
430 shape.offset = operand.zeroPoint;
431 return shape;
432}
433
434#endif
435
Kevin May42477c12020-03-26 13:34:14 +0000436#ifdef ARMNN_ANDROID_NN_V1_3
437
438Shape GetOperandShape(const V1_3::Operand& operand)
439{
440 Shape shape;
441 shape.type = OperandType(operand.type);
442 shape.dimensions = operand.dimensions;
443 shape.scale = operand.scale;
444 shape.offset = operand.zeroPoint;
445 return shape;
446}
447
448#endif
449
arovir01b0717b52018-09-05 17:03:25 +0100450// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
451// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100452// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
453// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100454void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000455 const armnn::TensorInfo& weightInfo,
456 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100457{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000458 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 // NOTE: Bias scale is always set to 0 for per-axis quantization and
461 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
462 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100463 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 return biasScale * inputInfo.GetQuantizationScale();
465 };
466
467 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
468 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
469
470 biasInfo.SetQuantizationScales(biasScales);
471 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
472
473 ALOGV("Bias quantization params have been updated for per-axis quantization");
474 }
475 else
476 {
477 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
478 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
479 {
James Ward4e22f602020-10-20 15:50:33 +0100480 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000481 {
482 ALOGW("Bias quantization scale has been modified to match input * weights");
483 biasInfo.SetQuantizationScale(expectedBiasScale);
484 }
arovir01b0717b52018-09-05 17:03:25 +0100485 }
486 }
487}
488
489// 4D Tensor Permutations
490const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100491const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100492const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
493
494// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000495const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
496const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100497
498template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000499armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
500 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100501{
502 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000503 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100504
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100505 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100506
507 // Connect input to swizzle layer
508 input.Connect(layer->GetInputSlot(0));
509
510 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000511 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100512 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
513
514 return *layer;
515}
516
arovir01b0717b52018-09-05 17:03:25 +0100517bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
518 const armnn::TensorShape & outputShape,
519 uint32_t concatDim)
520{
521 // Validate the output shape is correct given the input shapes (which have just been validated)
522 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
523 if (outputShape.GetNumDimensions() != numDimensions)
524 {
525 return Fail("%s: Output shape has wrong number of dimensions", __func__);
526 }
527
528 unsigned int outputSizeAlongConcatenatedDimension = 0;
529 for (unsigned int i = 0; i < inputShapes.size(); i++)
530 {
531 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
532 }
533
534 for (unsigned int i = 0; i < numDimensions; ++i)
535 {
536 if (i == concatDim)
537 {
538 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
539 {
540 return Fail(
541 "%s: Invalid output shape for dimension %d (%d != %d)",
542 __func__,
543 i,
544 outputShape[i],
545 outputSizeAlongConcatenatedDimension);
546 }
547 }
548 else
549 {
550 if (outputShape[i] != inputShapes[0][i])
551 {
552 return Fail("%s: Invalid output shape", __func__);
553 }
554 }
555 }
556
557 return true;
558}
559
560bool RequiresReshape(armnn::TensorShape & inputShape)
561{
562 return inputShape.GetNumDimensions() < 3;
563}
564
arovir01b0717b52018-09-05 17:03:25 +0100565void SwizzleInputs(armnn::INetwork& network,
566 std::vector<LayerInputHandle>& inputs,
567 std::vector<armnn::TensorShape>& inputShapes,
568 const armnn::PermutationVector& mapping)
569{
570 if (!mapping.IsEqual(IdentityPermutation4D))
571 {
572 size_t nInputs = inputs.size();
573 for (size_t i=0; i<nInputs; ++i)
574 {
575 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000576 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100577 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
578 auto& outputInfo = outputSlot.GetTensorInfo();
579 // replace inputs with the swizzled ones
580 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
581 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
582 }
583 }
584}
585
Teresa Charlin185f5882020-04-06 21:59:18 +0100586bool TransposeInputTensors(ConversionData& data,
587 std::vector<LayerInputHandle>& inputs,
588 std::vector<armnn::TensorShape>& inputShapes,
589 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590{
David Monahan7f492ac2020-10-16 10:36:29 +0100591 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
592 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000593 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100594 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000595 size_t nInputs = inputs.size();
596 for (size_t i=0; i<nInputs; ++i)
597 {
598 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000599 armnn::TransposeDescriptor transposeDesc;
600 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100601 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000602
603 bool isSupported = false;
604 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000605 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606 data.m_Backends,
607 isSupported,
608 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100609 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000610 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000611 if (!isSupported)
612 {
613 return false;
614 }
615
616 }
617 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
618 }
619 return true;
620}
621
622
narpra01f176d5a2018-11-18 20:17:48 +0000623bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
624 int32_t & concatDimension,
625 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100626{
narpra01f176d5a2018-11-18 20:17:48 +0000627 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100628 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100629
630 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000631 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
632 // or along dimension 0 or 2 for a 3-D tensor.
633 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100634 {
narpra01f176d5a2018-11-18 20:17:48 +0000635 concatDimension = 1;
636 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
637 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100638 }
narpra01f176d5a2018-11-18 20:17:48 +0000639 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100640 {
narpra01f176d5a2018-11-18 20:17:48 +0000641 concatDimension = 0;
642 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
643 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100644 }
David Monahan7f492ac2020-10-16 10:36:29 +0100645 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
646 // permutation identity to only have 3 dimensions
647 else if (numberOfDimensions == 3 && concatDimension == 2)
648 {
649 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
650 }
narpra01f176d5a2018-11-18 20:17:48 +0000651 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100652}
653
654} // anonymous namespace
655
656namespace armnn_driver
657{
658
659//// Creates an ArmNN activation layer and connects it to the given layer, if the
660//// passed in AndroidNN activation function requires so.
661//// @return The end layer of the sequence of layers built for the given AndroidNN
662//// activation function or nullptr if an error occurred (e.g. unsupported activation).
663//// Note that the end layer matches the input layer if no activation is required
664//// (the sequence of layers has length 1).
665armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
666 ActivationFn activation,
667 armnn::IConnectableLayer* prevLayer,
668 ConversionData& data);
669
670} // namespace armnn_driver
671
672///
673/// Utility templates
674///
675
676namespace armnn_driver
677{
678
679using namespace android::nn;
680
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681template<typename HalPolicy,
682 typename HalOperand = typename HalPolicy::Operand,
683 typename HalOperation = typename HalPolicy::Operation,
684 typename HalModel = typename HalPolicy::Model>
685const HalOperand* GetInputOperand(const HalOperation& operation,
686 uint32_t inputIndex,
687 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100688 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100689{
690 if (inputIndex >= operation.inputs.size())
691 {
saoste01b8471482018-10-10 09:44:51 +0100692 if (failOnIndexOutOfBounds)
693 {
694 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
695 }
arovir01b0717b52018-09-05 17:03:25 +0100696 return nullptr;
697 }
698
Kevin May42477c12020-03-26 13:34:14 +0000699 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100700 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000701 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100702}
703
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704template<typename HalPolicy,
705 typename HalOperand = typename HalPolicy::Operand,
706 typename HalOperation = typename HalPolicy::Operation,
707 typename HalModel = typename HalPolicy::Model>
708const HalOperand* GetOutputOperand(const HalOperation& operation,
709 uint32_t outputIndex,
710 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100711{
712 if (outputIndex >= operation.outputs.size())
713 {
714 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
715 return nullptr;
716 }
717
718 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100719 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100720
Kevin May42477c12020-03-26 13:34:14 +0000721 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100722}
723
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100724template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100725 typename HalOperand = typename HalPolicy::Operand,
726 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100727const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100728 const HalModel& model,
729 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000730 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100731{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100733
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100735 switch (operand.lifetime)
736 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100738 {
739 // Constant found in model.operandValues
740 valueStart = &model.operandValues[operand.location.offset];
741 break;
742 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100743 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100744 {
745 // Constant specified via a Memory object
746 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
747 break;
748 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100749 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000750 {
751 // An optional input tensor with no values is not an error so should not register as a fail
752 if (optional)
753 {
754 valueStart = nullptr;
755 break;
756 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100757 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000758 }
arovir01b0717b52018-09-05 17:03:25 +0100759 default:
760 {
761 // Unsupported/invalid (e.g. can't get value of an input to the model)
762 Fail("%s: unsupported/invalid operand lifetime: %s",
763 __func__, toString(operand.lifetime).c_str());
764 valueStart = nullptr;
765 }
766 }
767
768 return valueStart;
769}
770
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100771template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100772 typename HalOperation = typename HalPolicy::Operation,
773 typename HalModel = typename HalPolicy::Model,
774 typename HalOperandType = typename HalPolicy::OperandType>
775bool GetOperandType(const HalOperation& operation,
776 uint32_t inputIndex,
777 const HalModel& model,
778 HalOperandType& type)
779{
780 using HalOperand = typename HalPolicy::Operand;
781
782 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
783 if (!operand)
784 {
785 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
786 }
787
788 type = operand->type;
789 return true;
790}
791
792template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000793 typename HalOperand = typename HalPolicy::Operand>
794bool IsOperandConstant(const HalOperand& operand)
795{
796 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
797
798 HalOperandLifeTime lifetime = operand.lifetime;
799
800 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
801 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
802 lifetime == HalOperandLifeTime::NO_VALUE;
803}
804
805template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100806 typename HalOperand = typename HalPolicy::Operand,
807 typename HalModel = typename HalPolicy::Model>
808ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
809 const HalModel& model,
810 const ConversionData& data,
811 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
812 const armnn::TensorShape* overrideTensorShape = nullptr,
813 bool optional = false)
814{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100815 if (!IsOperandTypeSupportedForTensors(operand.type))
816 {
817 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
818 return ConstTensorPin();
819 }
820
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000821 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100822 {
823 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
824 return ConstTensorPin();
825 }
826
827 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
828 if (!valueStart)
829 {
830 if (optional)
831 {
832 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
833 return ConstTensorPin(true);
834 }
835 // mandatory tensor with no values
836 Fail("%s: failed to get operand address", __func__);
837 return ConstTensorPin();
838 }
839
840 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000841 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
842 if (tensorInfo.HasPerAxisQuantization())
843 {
844 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
845 }
846
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 if (overrideTensorShape != nullptr)
848 {
849 tensorInfo.SetShape(*overrideTensorShape);
850 }
851 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
852}
853
854template<typename HalPolicy,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
857ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
858 uint32_t inputIndex,
859 const HalModel& model,
860 const ConversionData& data,
861 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
862 const armnn::TensorShape* overrideTensorShape = nullptr,
863 bool optional = false)
864{
865 using HalOperand = typename HalPolicy::Operand;
866
867 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
868 if (!operand)
869 {
870 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
871 return ConstTensorPin();
872 }
873 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
874 model,
875 data,
876 dimensionMappings,
877 overrideTensorShape,
878 optional);
879}
880
881template<typename HalPolicy,
882 typename OutputType,
883 typename HalOperandType = typename HalPolicy::OperandType,
884 typename HalOperation = typename HalPolicy::Operation,
885 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100886bool GetInputScalar(const HalOperation& operation,
887 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100888 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100889 OutputType& outValue,
890 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100891 const ConversionData& data,
892 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100893{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100894 using HalOperand = typename HalPolicy::Operand;
895
896 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100898 {
899 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
900 }
901
Sadik Armagan813f2302020-05-19 14:10:30 +0100902 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100903 {
904 return Fail("%s: unexpected operand type: %s (should be %s)",
905 __func__, toString(operand->type).c_str(), toString(type).c_str());
906 }
907
Sadik Armagan813f2302020-05-19 14:10:30 +0100908 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100909 {
910 return Fail("%s: incorrect operand location length: %i (should be %i)",
911 __func__, operand->location.length, sizeof(OutputType));
912 }
913
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100915 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100916 {
917 return Fail("%s: failed to get address for operand", __func__);
918 }
919
Sadik Armagan813f2302020-05-19 14:10:30 +0100920 if(!optional)
921 {
922 outValue = *(static_cast<const OutputType*>(valueAddress));
923 }
924
arovir01b0717b52018-09-05 17:03:25 +0100925 return true;
926}
927
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928template<typename HalPolicy,
929 typename HalOperation = typename HalPolicy::Operation,
930 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100931bool GetInputInt32(const HalOperation& operation,
932 uint32_t inputIndex,
933 int32_t& outValue,
934 const HalModel& model,
935 const ConversionData& data)
936{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100938}
939
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940template<typename HalPolicy,
941 typename HalOperation = typename HalPolicy::Operation,
942 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100943bool GetInputFloat32(const HalOperation& operation,
944 uint32_t inputIndex,
945 float& outValue,
946 const HalModel& model,
947 const ConversionData& data)
948{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100949 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100950}
951
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952template<typename HalPolicy,
953 typename HalOperation = typename HalPolicy::Operation,
954 typename HalOperandType = typename HalPolicy::OperandType,
955 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100956bool GetInputActivationFunctionImpl(const HalOperation& operation,
957 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100958 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100959 ActivationFn& outActivationFunction,
960 const HalModel& model,
961 const ConversionData& data)
962{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100963 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100964 {
965 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
966 __func__,
967 toString(type).c_str(),
968 toString(OperandType::INT32).c_str(),
969 toString(OperandType::TENSOR_INT32).c_str());
970 }
971
972 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100974 {
975 return Fail("%s: failed to get activation input value", __func__);
976 }
977 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
978 return true;
979}
980
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981template<typename HalPolicy,
982 typename HalOperation = typename HalPolicy::Operation,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetInputActivationFunction(const HalOperation& operation,
985 uint32_t inputIndex,
986 ActivationFn& outActivationFunction,
987 const HalModel& model,
988 const ConversionData& data)
989{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990 return GetInputActivationFunctionImpl<HalPolicy>(operation,
991 inputIndex,
992 HalPolicy::OperandType::INT32,
993 outActivationFunction,
994 model,
995 data);
arovir01b0717b52018-09-05 17:03:25 +0100996}
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998template<typename HalPolicy,
999 typename HalOperation = typename HalPolicy::Operation,
1000 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001001bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1002 uint32_t inputIndex,
1003 ActivationFn& outActivationFunction,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
1007 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1009 inputIndex,
1010 HalPolicy::OperandType::INT32,
1011 outActivationFunction,
1012 model,
1013 data);
arovir01b0717b52018-09-05 17:03:25 +01001014}
1015
1016
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017template<typename HalPolicy,
1018 typename HalOperation = typename HalPolicy::Operation,
1019 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001020bool GetOptionalInputActivation(const HalOperation& operation,
1021 uint32_t inputIndex,
1022 ActivationFn& activationFunction,
1023 const HalModel& model,
1024 const ConversionData& data)
1025{
1026 if (operation.inputs.size() <= inputIndex)
1027 {
1028 activationFunction = ActivationFn::kActivationNone;
1029 }
1030 else
1031 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001033 {
1034 return Fail("%s: Operation has invalid inputs", __func__);
1035 }
1036 }
1037 return true;
1038}
1039
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001040template<typename HalPolicy,
1041 typename ConvolutionDescriptor,
1042 typename HalOperation = typename HalPolicy::Operation,
1043 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001044bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1045 uint32_t dilationXIndex,
1046 ConvolutionDescriptor& descriptor,
1047 const HalModel& model,
1048 const ConversionData& data)
1049{
1050 bool success = true;
1051 if (operation.inputs.size() >= dilationXIndex + 2)
1052 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001053 success &= GetInputScalar<HalPolicy>(operation,
1054 dilationXIndex,
1055 HalPolicy::OperandType::INT32,
1056 descriptor.m_DilationX,
1057 model,
1058 data);
1059 success &= GetInputScalar<HalPolicy>(operation,
1060 dilationXIndex + 1,
1061 HalPolicy::OperandType::INT32,
1062 descriptor.m_DilationY,
1063 model,
1064 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001065 }
1066
1067 return success;
1068}
1069
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001070template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001071 typename HalOperation = typename HalPolicy::Operation,
1072 typename HalModel = typename HalPolicy::Model>
1073bool GetOptionalBool(const HalOperation& operation,
1074 uint32_t inputIndex,
1075 const HalModel& model,
1076 const ConversionData& data)
1077{
1078 using HalOperand = typename HalPolicy::Operand;
1079
1080 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1081 if (!operand)
1082 {
1083 return false;
1084 }
1085
1086 if (!IsBool(*operand))
1087 {
1088 return false;
1089 }
1090
1091 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1092 if (!valueAddress)
1093 {
1094 return false;
1095 }
1096
1097 if (*(static_cast<const bool*>(valueAddress)))
1098 {
1099 return true;
1100 }
1101 else
1102 {
1103 return false;
1104 }
1105}
1106
1107template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 typename HalOperand = typename HalPolicy::Operand,
1109 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001110bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001111 std::vector<int32_t>& outValues,
1112 const HalModel& model,
1113 const ConversionData& data)
1114{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001115 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001116 {
1117 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1118 }
1119
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001120 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001121 if (!startAddress)
1122 {
1123 return Fail("%s: failed to get operand address", __func__, operand.type);
1124 }
1125
1126 // Check number of bytes is sensible
1127 const uint32_t numBytes = operand.location.length;
1128 if (numBytes % sizeof(int32_t) != 0)
1129 {
1130 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1131 __func__, numBytes, sizeof(int32_t));
1132 }
1133
1134 outValues.resize(numBytes / sizeof(int32_t));
1135 memcpy(outValues.data(), startAddress, numBytes);
1136 return true;
1137}
1138
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001139template<typename HalPolicy,
1140 typename HalOperation = typename HalPolicy::Operation,
1141 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001142bool GetInputPaddingScheme(const HalOperation& operation,
1143 uint32_t inputIndex,
1144 PaddingScheme& outPaddingScheme,
1145 const HalModel& model,
1146 const ConversionData& data)
1147{
1148 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001149 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001150 {
1151 return Fail("%s: failed to get padding scheme input value", __func__);
1152 }
1153
1154 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1155 return true;
1156}
1157
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158template<typename HalPolicy,
1159 typename HalOperation = typename HalPolicy::Operation,
1160 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001161LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1162 uint32_t inputIndex,
1163 const HalModel& model,
1164 ConversionData& data)
1165{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001166 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001167 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1169
1170 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001171 if (!operand)
1172 {
1173 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1174 return LayerInputHandle();
1175 }
1176
1177 if (!IsOperandTypeSupportedForTensors(operand->type))
1178 {
1179 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1180 return LayerInputHandle();
1181 }
1182
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 try
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001186 if (IsDynamicTensor(operandTensorInfo))
1187 {
1188 Fail("%s: dynamic input tensors are not supported", __func__);
1189 return LayerInputHandle();
1190 }
arovir01b0717b52018-09-05 17:03:25 +01001191
Sadik Armagan44bcc022019-06-18 17:21:36 +01001192 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001193 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001194 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001195 {
1196 // NOTE: We must check whether we can support the input tensor on at least one
1197 // of the provided backends; otherwise we cannot convert the operation
1198 bool isInputSupported = false;
1199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1200 IsInputSupported,
1201 data.m_Backends,
1202 isInputSupported,
1203 operandTensorInfo);
1204
1205 if (!isInputSupported)
1206 {
1207 Fail("%s: unsupported input tensor", __func__);
1208 return LayerInputHandle();
1209 }
1210
James Ward4e22f602020-10-20 15:50:33 +01001211 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001212 }
1213 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001214 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001215 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001216 // The tensor is either an operand internal to the model, or a model input.
1217 // It can be associated with an ArmNN output slot for an existing layer.
1218
1219 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1220 const uint32_t operandIndex = operation.inputs[inputIndex];
1221 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001223 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001224 case HalOperandLifeTime::CONSTANT_REFERENCE:
1225 {
1226 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1227 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1228 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001229 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001230 bool isSupported = false;
1231 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1232 IsConstantSupported,
1233 data.m_Backends,
1234 isSupported,
1235 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001236 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001237 {
1238 return LayerInputHandle();
1239 }
1240
1241 armnn::IConnectableLayer* constantLayer =
1242 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1243 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1244 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1245
1246 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1247 }
1248 else
1249 {
1250 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001251 return LayerInputHandle();
1252 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001253 break;
arovir01b0717b52018-09-05 17:03:25 +01001254 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001255 default:
arovir01b0717b52018-09-05 17:03:25 +01001256 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001257 // Unsupported lifetime for an input tensor
1258 Fail("%s: unsupported lifetime for input tensor: %s",
1259 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001260 return LayerInputHandle();
1261 }
arovir01b0717b52018-09-05 17:03:25 +01001262 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001263 }
1264 catch (UnsupportedOperand<HalOperandType>& e)
1265 {
1266 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1267 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001268 }
1269}
1270
Kevin May42477c12020-03-26 13:34:14 +00001271
1272#ifdef ARMNN_ANDROID_NN_V1_3
1273template<typename HalPolicy>
1274LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1275 uint32_t inputIndex,
1276 const::android::hardware::neuralnetworks::V1_3::Model& model,
1277 ConversionData& data)
1278{
1279 using HalOperand = typename HalPolicy::Operand;
1280 using HalOperandType = typename HalPolicy::OperandType;
1281 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1282
1283 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1284 if (!operand)
1285 {
1286 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1287 return LayerInputHandle();
1288 }
1289
1290 if (!IsOperandTypeSupportedForTensors(operand->type))
1291 {
1292 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1293 return LayerInputHandle();
1294 }
1295
1296 try
1297 {
1298 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001299
Kevin May42477c12020-03-26 13:34:14 +00001300 if (IsDynamicTensor(operandTensorInfo))
1301 {
Finn Williams291a16b2020-08-19 22:54:00 +01001302 data.m_DynamicInputsEncountered = true;
1303
Finn Williams9a044412020-08-17 19:08:35 +01001304 const uint32_t operandIndex = operation.inputs[inputIndex];
1305
1306 // Check if the dynamic input tensors have been inferred by one of the previous layers
1307 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001308 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001309 {
1310 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1311 }
1312 else
1313 {
1314 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1315 return LayerInputHandle();
1316 }
Kevin May42477c12020-03-26 13:34:14 +00001317 }
1318
1319 switch (operand->lifetime)
1320 {
1321 case HalOperandLifeTime::SUBGRAPH_INPUT:
1322 {
1323 // NOTE: We must check whether we can support the input tensor on at least one
1324 // of the provided backends; otherwise we cannot convert the operation
1325 bool isInputSupported = false;
1326 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1327 IsInputSupported,
1328 data.m_Backends,
1329 isInputSupported,
1330 operandTensorInfo);
1331
1332 if (!isInputSupported)
1333 {
1334 Fail("%s: unsupported input tensor", __func__);
1335 return LayerInputHandle();
1336 }
1337
James Ward4e22f602020-10-20 15:50:33 +01001338 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001339 }
1340 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1341 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1342 {
1343 // The tensor is either an operand internal to the model, or a model input.
1344 // It can be associated with an ArmNN output slot for an existing layer.
1345
1346 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1347 const uint32_t operandIndex = operation.inputs[inputIndex];
1348 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1349 }
1350 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1351 case HalOperandLifeTime::CONSTANT_REFERENCE:
1352 {
1353 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1354 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1355 if (tensorPin.IsValid())
1356 {
1357 bool isSupported = false;
1358 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1359 IsConstantSupported,
1360 data.m_Backends,
1361 isSupported,
1362 tensorPin.GetConstTensor().GetInfo());
1363 if (!isSupported)
1364 {
1365 return LayerInputHandle();
1366 }
1367
1368 armnn::IConnectableLayer* constantLayer =
1369 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1370 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1371 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1372
1373 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1374 }
1375 else
1376 {
1377 Fail("%s: invalid operand tensor", __func__);
1378 return LayerInputHandle();
1379 }
1380 break;
1381 }
1382 default:
1383 {
1384 // Unsupported lifetime for an input tensor
1385 Fail("%s: unsupported lifetime for input tensor: %s",
1386 __func__, toString(operand->lifetime).c_str());
1387 return LayerInputHandle();
1388 }
1389 }
1390 }
1391 catch (UnsupportedOperand<HalOperandType>& e)
1392 {
1393 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1394 return LayerInputHandle();
1395 }
1396}
1397#endif
1398
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001399template<typename HalPolicy,
1400 typename HalOperation = typename HalPolicy::Operation,
1401 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001402bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1403 uint32_t operationOutputIndex,
1404 armnn::IConnectableLayer& layer,
1405 uint32_t layerOutputIndex,
1406 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001407 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001408 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001409 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001410 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001411 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001413 using HalOperand = typename HalPolicy::Operand;
1414
1415 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001416 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1417 {
1418 return false;
1419 }
1420
1421 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001422 if (overrideOutputInfo == nullptr)
1423 {
1424 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1425 }
1426 else
1427 {
1428 outputSlot.SetTensorInfo(*overrideOutputInfo);
1429 }
1430
Finn Williamsa4983ce2020-07-23 12:55:12 +01001431 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001432 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001433 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001434 // Type one dynamic tensors require the previous layer's output shape for inference
1435 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1436 {
1437 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1438 {
1439 return false;
1440 }
1441 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001442 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001443 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001444 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001445 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1446
Sadik Armagandbda4b72020-09-03 11:33:07 +01001447 if(!isSupported)
1448 {
1449 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1450 {
1451 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1452 }
1453 return false;
1454 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001455 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456
Finn Williamsa4983ce2020-07-23 12:55:12 +01001457 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001458
1459 if (activationFunction != ActivationFn::kActivationNone)
1460 {
1461 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1462 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1463 &layer, data);
1464
1465 if (!endLayer)
1466 {
1467 return Fail("%s: ProcessActivation failed", __func__);
1468 }
1469
1470 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1471 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1472 }
1473 else
1474 {
1475 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1476 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001477
Mike Kellyb5fdf382019-06-11 16:35:25 +01001478 return true;
1479}
1480
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481template<typename HalPolicy,
1482 typename HalOperation = typename HalPolicy::Operation,
1483 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001484armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1485 uint32_t inputIndex,
1486 const HalModel& model,
1487 ConversionData& data)
1488{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001489 using HalOperand = typename HalPolicy::Operand;
1490
1491 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001492 if (!operand)
1493 {
1494 return armnn::DataLayout::NHWC;
1495 }
1496
1497 if (!IsBool(*operand))
1498 {
1499 return armnn::DataLayout::NHWC;
1500 }
1501
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001502 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001503 if (!valueAddress)
1504 {
1505 return armnn::DataLayout::NHWC;
1506 }
1507
1508 if (*(static_cast<const bool*>(valueAddress)))
1509 {
1510 return armnn::DataLayout::NCHW;
1511 }
1512 else
1513 {
1514 return armnn::DataLayout::NHWC;
1515 }
1516}
1517
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001518template<typename HalPolicy,
1519 typename HalOperation = typename HalPolicy::Operation,
1520 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001521bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1522 uint32_t outputIndex,
1523 armnn::IConnectableLayer& layer,
1524 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001525 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001526 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001527 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1528 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001529{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001530 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1531 outputIndex,
1532 layer,
1533 outputIndex,
1534 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001535 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001536 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001537 validateFunc,
1538 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001539}
1540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001541template<typename HalPolicy,
1542 typename HalOperation = typename HalPolicy::Operation,
1543 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001544bool ConvertToActivation(const HalOperation& operation,
1545 const char* operationName,
1546 const armnn::ActivationDescriptor& activationDesc,
1547 const HalModel& model,
1548 ConversionData& data)
1549{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001550 using HalOperand = typename HalPolicy::Operand;
1551
1552 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001553 if (!input.IsValid())
1554 {
1555 return Fail("%s: Input 0 is invalid", operationName);
1556 }
1557
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001558 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001559 if (!outputOperand)
1560 {
1561 return false;
1562 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001563
1564 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001565
1566 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001567
1568 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1569 {
1570 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1571 IsActivationSupported,
1572 data.m_Backends,
1573 isSupported,
1574 input.GetTensorInfo(),
1575 outInfo,
1576 activationDesc);
1577 };
1578
1579 if(IsDynamicTensor(outInfo))
1580 {
1581 isSupported = AreDynamicTensorsSupported();
1582 }
1583 else
1584 {
1585 validateFunc(outInfo, isSupported);
1586 }
1587
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001588 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001589 {
1590 return false;
1591 }
1592
1593 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001594 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001595 input.Connect(layer->GetInputSlot(0));
1596
Finn Williamsa4983ce2020-07-23 12:55:12 +01001597 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001598}
1599
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001600template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001601 typename HalOperation = typename HalPolicy::Operation,
1602 typename HalModel = typename HalPolicy::Model>
1603bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1604{
1605 armnn::ActivationDescriptor desc;
1606 desc.m_Function = armnn::ActivationFunction::ReLu;
1607
1608 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1609}
1610
1611template<typename HalPolicy,
1612 typename HalOperation = typename HalPolicy::Operation,
1613 typename HalModel = typename HalPolicy::Model>
1614bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1615{
1616 armnn::ActivationDescriptor desc;
1617 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1618 desc.m_A = 1.0f;
1619 desc.m_B = -1.0f;
1620
1621 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1622}
1623
1624template<typename HalPolicy,
1625 typename HalOperation = typename HalPolicy::Operation,
1626 typename HalModel = typename HalPolicy::Model>
1627bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1628{
1629 armnn::ActivationDescriptor desc;
1630 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1631 desc.m_A = 6.0f;
1632
1633 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1634}
1635
1636template<typename HalPolicy,
1637 typename HalOperation = typename HalPolicy::Operation,
1638 typename HalModel = typename HalPolicy::Model>
1639bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1640{
1641 armnn::ActivationDescriptor desc;
1642 desc.m_Function = armnn::ActivationFunction::TanH;
1643 desc.m_A = 1.0f; // android nn does not support tanH parameters
1644 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1645
1646 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1647}
1648
1649template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001650 typename HalOperation = typename HalPolicy::Operation,
1651 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001652bool ConvertPaddings(const HalOperation& operation,
1653 const HalModel& model,
1654 ConversionData& data,
1655 unsigned int rank,
1656 armnn::PadDescriptor& padDescriptor)
1657{
1658 using HalOperand = typename HalPolicy::Operand;
1659
1660 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1661 if (!paddingsOperand)
1662 {
1663 return Fail("%s: Could not read paddings operand", __func__);
1664 }
1665
1666 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1667 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1668 {
1669 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1670 }
1671
1672 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001673 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1674 {
1675 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1676 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001677
1678 // add padding for each dimension of input tensor.
1679 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1680 {
1681 int paddingBeforeInput = paddings[i];
1682 int paddingAfterInput = paddings[i + 1];
1683
1684 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1685 {
1686 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1687 }
1688
1689 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1690 }
1691
1692 return true;
1693}
1694
1695template<typename HalPolicy,
1696 typename HalOperation = typename HalPolicy::Operation,
1697 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001698bool ConvertPooling2d(const HalOperation& operation,
1699 const char* operationName,
1700 armnn::PoolingAlgorithm poolType,
1701 const HalModel& model,
1702 ConversionData& data)
1703{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001704 using HalOperand = typename HalPolicy::Operand;
1705 using HalOperandType = typename HalPolicy::OperandType;
1706
1707 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001708 if (!input.IsValid())
1709 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001710 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001711 }
1712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001713 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001714 if (!output)
1715 {
1716 return Fail("%s: Could not read output 0", __func__);
1717 }
1718
1719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1721
arovir01b0717b52018-09-05 17:03:25 +01001722 armnn::Pooling2dDescriptor desc;
1723 desc.m_PoolType = poolType;
1724 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001725 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001726
1727 ActivationFn activation;
1728
Sadik Armagan15d63e22019-07-26 16:59:35 +01001729 auto inputSize = operation.inputs.size();
1730
1731 if (inputSize >= 10)
1732 {
1733 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1734 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1735 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1736 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1737 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1738 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1739 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1740 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1741 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1742 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1743 {
1744 return Fail("%s: Operation has invalid inputs", operationName);
1745 }
1746
Kevin May42477c12020-03-26 13:34:14 +00001747 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001748 {
1749 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1750 }
1751 }
1752 else
arovir01b0717b52018-09-05 17:03:25 +01001753 {
1754 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1755 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001756 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1757 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1758 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1759 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1760 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1761 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001762 {
1763 return Fail("%s: Operation has invalid inputs", operationName);
1764 }
1765
Kevin May42477c12020-03-26 13:34:14 +00001766 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001767 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001768 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001770
1771 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1772 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1773 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1774
1775 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1776 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001777 }
1778
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001779 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001780
1781 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1782 {
1783 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1784 IsPooling2dSupported,
1785 data.m_Backends,
1786 isSupported,
1787 inputInfo,
1788 outputInfo,
1789 desc);
1790
1791 };
1792
1793 if(IsDynamicTensor(outputInfo))
1794 {
1795 isSupported = AreDynamicTensorsSupported();
1796 }
1797 else
1798 {
1799 validateFunc(outputInfo, isSupported);
1800 }
1801
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001802 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001803 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001804 return false;
arovir01b0717b52018-09-05 17:03:25 +01001805 }
arovir01b0717b52018-09-05 17:03:25 +01001806
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001807 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1808 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001809 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001810 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001811 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001812
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001813 input.Connect(pooling2dLayer->GetInputSlot(0));
1814
Finn Williamsa4983ce2020-07-23 12:55:12 +01001815 if (!isSupported)
1816 {
1817 return false;
1818 }
1819
Kevin Mayfcf2a152020-09-08 16:06:32 +01001820 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1821 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001822}
1823
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001824template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001825 typename HalOperation = typename HalPolicy::Operation,
1826 typename HalModel = typename HalPolicy::Model>
1827bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001828{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001829 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001830
1831 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1832 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1833
1834 if (!input0.IsValid() || !input1.IsValid())
1835 {
1836 return Fail("%s: Operation has invalid inputs", __func__);
1837 }
1838
1839 // The FuseActivation parameter is always the input index 2
1840 // and it should be optional
1841 ActivationFn activationFunction;
1842 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1843 {
1844 return Fail("%s: Operation has invalid inputs", __func__);
1845 }
1846
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001847 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001848 if (!outputOperand)
1849 {
1850 return false;
1851 }
1852
1853 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1854 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1855
1856 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001857
1858 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001859 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1860 {
1861 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1862 IsAdditionSupported,
1863 data.m_Backends,
1864 isSupported,
1865 inputInfo0,
1866 inputInfo1,
1867 outputInfo);
1868 };
1869
1870 if(!IsDynamicTensor(outputInfo))
1871 {
1872 validateFunc(outputInfo, isSupported);
1873 }
1874 else
1875 {
1876 isSupported = AreDynamicTensorsSupported();
1877 }
1878
Mike Kelly46272802019-08-14 17:00:48 +01001879 if (!isSupported)
1880 {
1881 return false;
1882 }
1883
1884 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001885
Kevin Mayfcf2a152020-09-08 16:06:32 +01001886 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1887 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001888 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001889 return false;
1890 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001891
Kevin Mayfcf2a152020-09-08 16:06:32 +01001892 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1893 data, nullptr, validateFunc, activationFunction);
1894
Mike Kelly46272802019-08-14 17:00:48 +01001895}
1896
1897template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001898 typename HalOperation = typename HalPolicy::Operation,
1899 typename HalModel = typename HalPolicy::Model>
1900bool ConvertArgMinMax(const HalOperation& operation,
1901 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001902 ConversionData& data,
1903 armnn::ArgMinMaxFunction argMinMaxFunction)
1904{
1905 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1906
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001907 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001908 using HalOperandType = typename HalPolicy::OperandType;
1909
1910 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1911
1912 if (!input0.IsValid())
1913 {
1914 return Fail("%s: Operation has invalid inputs", __func__);
1915 }
1916
1917 int32_t axis;
1918 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1919 {
1920 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1921 }
1922
1923 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1924 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1925
1926 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1927 {
1928 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1929 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1930 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1931 return Fail("%s: Axis must be in range [-n, n)", __func__);
1932 }
1933
1934 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1935 if (!output)
1936 {
1937 return Fail("%s: Could not read output 0", __func__);
1938 }
1939
1940 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1941
1942 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001943
1944 armnn::ArgMinMaxDescriptor descriptor;
1945 descriptor.m_Function = argMinMaxFunction;
1946 descriptor.m_Axis = axis;
1947
1948 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001949
1950 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1951 {
1952 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1953 IsArgMinMaxSupported,
1954 data.m_Backends,
1955 isSupported,
1956 inputInfo0,
1957 outputInfo,
1958 descriptor);
1959 };
1960
1961 if(IsDynamicTensor(outputInfo))
1962 {
1963 isSupported = AreDynamicTensorsSupported();
1964 }
1965 else
1966 {
1967 validateFunc(outputInfo, isSupported);
1968 }
1969
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001970 if (!isSupported)
1971 {
1972 return false;
1973 }
1974
1975 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1976 assert(layer != nullptr);
1977
1978 input0.Connect(layer->GetInputSlot(0));
1979
Finn Williamsa4983ce2020-07-23 12:55:12 +01001980 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001981}
1982
1983template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001984 typename HalOperation = typename HalPolicy::Operation,
1985 typename HalModel = typename HalPolicy::Model>
1986bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001987{
Keith Davis6e4081f2020-09-03 13:17:21 +01001988 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001989 using HalOperandType = typename HalPolicy::OperandType;
1990
1991 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1992 if (operation.inputs.size() <= 1)
1993 {
1994 return Fail("%s: Operation has insufficient arguments", __func__);
1995 }
1996
1997 // Get inputs and outputs
1998 const std::size_t numInputTensors = operation.inputs.size() - 1;
1999
2000 int32_t concatDim;
2001 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2002 {
2003 return Fail("%s: Operation has invalid inputs", __func__);
2004 }
2005
2006 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2007 if (!outputOperand)
2008 {
2009 return Fail("%s: Operation has no outputs", __func__);
2010 }
2011
Keith Davis6e4081f2020-09-03 13:17:21 +01002012 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2013 armnn::TensorShape outputShape = outputInfo.GetShape();
2014 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002015 //
2016 // handle negative concat dims along the lines of tensorflow as described here:
2017 // https://www.tensorflow.org/api_docs/python/tf/concat
2018 // "negative axis refers to axis + rank(values)-th dimension"
2019 //
2020 if (concatDim < 0)
2021 {
2022 concatDim += outputShape.GetNumDimensions();
2023 }
2024
2025 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2026 {
2027 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2028 }
2029
2030 std::vector<LayerInputHandle> inputHandles;
2031 std::vector<armnn::TensorShape> inputShapes;
2032
2033 inputHandles.reserve(numInputTensors);
2034 inputShapes.reserve(numInputTensors);
2035
Keith Davis6e4081f2020-09-03 13:17:21 +01002036 bool inputsHaveBeenReshaped = false;
2037 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002038 for (uint32_t i = 0; i < numInputTensors; ++i)
2039 {
2040 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2041 if (!operand)
2042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
Teresa Charlin3b959602019-10-31 17:05:47 +00002046 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2047 if (!operandInputHandle.IsValid())
2048 {
2049 return Fail("%s: Operation has invalid inputs", __func__);
2050 }
Mike Kellyb8805202019-07-31 17:25:43 +01002051
Keith Davis6e4081f2020-09-03 13:17:21 +01002052 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002053 if (operandShape.GetNumDimensions() == 0)
2054 {
2055 return Fail("%s: Operands with rank 0 are not supported", __func__);
2056 }
2057
2058 if (RequiresReshape(operandShape))
2059 {
2060 inputsHaveBeenReshaped = true;
2061
2062 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2063
2064 // Expand the tensor to three dimensions
2065 if (operandShape.GetNumDimensions() == 2)
2066 {
2067 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2068 tensorDimensionsAdded = 1;
2069 }
2070 else
2071 {
2072 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2073 tensorDimensionsAdded = 2;
2074 }
2075
Kevin Mayaed08ac2019-12-12 16:33:31 +00002076 armnn::ReshapeDescriptor reshapeDescriptor;
2077 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2078
2079 bool isSupported = false;
2080 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2081 IsReshapeSupported,
2082 data.m_Backends,
2083 isSupported,
2084 operandInputHandle.GetTensorInfo(),
2085 reshapeInfo,
2086 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002087
Kevin Mayaed08ac2019-12-12 16:33:31 +00002088 if (!isSupported)
2089 {
2090 return false;
2091 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002092 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002093
2094 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002095 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002096 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097 }
2098
2099 inputShapes.emplace_back(operandShape);
2100 inputHandles.emplace_back(operandInputHandle);
2101
2102 if (!inputHandles.back().IsValid())
2103 {
2104 return Fail("%s: Operation has invalid inputs", __func__);
2105 }
2106 }
2107
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002108 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002109
2110 if (inputsHaveBeenReshaped)
2111 {
2112 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2113 concatDim += tensorDimensionsAdded;
2114
2115 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2116 if (tensorDimensionsAdded == 1)
2117 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002118 if (IsDynamicTensor(outputInfo))
2119 {
2120 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2121 }
2122 else
2123 {
2124 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2125 }
Mike Kellyb8805202019-07-31 17:25:43 +01002126 }
2127 else if (tensorDimensionsAdded == 2)
2128 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002129 if (IsDynamicTensor(outputInfo))
2130 {
2131 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2132 }
2133 else
2134 {
2135 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2136 }
Mike Kellyb8805202019-07-31 17:25:43 +01002137 }
2138 }
2139
2140 // Check if permutations is required and get the pair of permutations required for the concatenation.
2141 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2142 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002143 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002144 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2145 concatDim,
2146 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002147
Keith Davis6e4081f2020-09-03 13:17:21 +01002148 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2149 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002150 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002151 if (needPermute)
2152 {
2153 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2154 }
2155
2156 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002157 }
Mike Kellyb8805202019-07-31 17:25:43 +01002158 // this is no-op for identity swizzles, otherwise it replaces both
2159 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002160 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002161 {
2162 return false;
2163 }
Mike Kellyb8805202019-07-31 17:25:43 +01002164
2165 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2166 armnn::OriginsDescriptor concatDescriptor;
2167
2168 try
2169 {
2170 // The concat descriptor is always created across the only supported concat dimension
2171 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002172 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2173 inputShapes.end(),
2174 concatDim);
2175 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002176 {
2177 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2178 }
2179
2180 // Validate the output shape is correct given the input shapes based on the
2181 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002182 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002183 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002184 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2185 {
2186 return Fail("%s: Error validating the output shape for concat", __func__);
2187 }
Mike Kellyb8805202019-07-31 17:25:43 +01002188 }
2189
2190 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2191 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002193
Keith Davis6e4081f2020-09-03 13:17:21 +01002194 bool isSupported = false;
2195 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2196 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2197 outputInfo, concatDescriptor);
2198 };
2199
2200 if (!isDynamicTensor)
2201 {
2202 validateFunc(outputInfo, isSupported);
2203 }
2204 else
2205 {
2206 isSupported = AreDynamicTensorsSupported();
2207 }
2208
Mike Kellyb8805202019-07-31 17:25:43 +01002209 if (!isSupported)
2210 {
2211 return false;
2212 }
2213
2214 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2215 assert(layer != nullptr);
2216 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002217 // Connect inputs to the layer
2218 const int numInputSlots = layer->GetNumInputSlots();
2219 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2220 for (int i = 0; i < numInputSlots; ++i)
2221 {
2222 // connect the input directly to the merge (concat) layer
2223 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2224 }
2225
Keith Davis6e4081f2020-09-03 13:17:21 +01002226 // Transpose the output shape
2227 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002228 armnn::TransposeDescriptor transposeDesc;
2229 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002230 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2231 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2232 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002233 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002234 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002235 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002236 data.m_Backends,
2237 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002238 inputTransposeInfo,
2239 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002240 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002241 if (!isSupported)
2242 {
2243 return false;
2244 }
Mike Kellyb8805202019-07-31 17:25:43 +01002245 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002246 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002247 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002248 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002249
2250 return true;
2251 };
2252
2253 if (needPermute && !isDynamicTensor)
2254 {
2255 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002256 }
2257
2258 if (inputsHaveBeenReshaped)
2259 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002260 if (isDynamicTensor)
2261 {
2262 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002263 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002264 if (!ValidateConcatOutputShape(inputShapes,
2265 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2266 concatDim))
2267 {
2268 return Fail("%s: Error validating the output shape for concat", __func__);
2269 }
2270 transposeOutputShape();
2271 }
2272
Mike Kellyb8805202019-07-31 17:25:43 +01002273 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002274 // Undo the reshape knowing the amount of dimensions added
2275 if (tensorDimensionsAdded == 1)
2276 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002277 afterConcatInfo.SetShape(
2278 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002279 }
2280 else if (tensorDimensionsAdded == 2)
2281 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002282 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002283 }
2284
Kevin Mayaed08ac2019-12-12 16:33:31 +00002285 armnn::ReshapeDescriptor reshapeDescriptor;
2286 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002287 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002288
Keith Davis6e4081f2020-09-03 13:17:21 +01002289 isSupported = false;
2290 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2292 IsReshapeSupported,
2293 data.m_Backends,
2294 isSupported,
2295 concatInfo,
2296 afterConcatInfo,
2297 reshapeDescriptor);
2298 };
2299
2300 if (!IsDynamicTensor(afterConcatInfo))
2301 {
2302 validateReshapeFunc(afterConcatInfo, isSupported);
2303 }
2304 else
2305 {
2306 isSupported = AreDynamicTensorsSupported();
2307 }
2308
Kevin Mayaed08ac2019-12-12 16:33:31 +00002309 if (!isSupported)
2310 {
2311 return false;
2312 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002313 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2314 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2315 0,
2316 *layer,
2317 model,
2318 data,
2319 nullptr,
2320 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002321 }
2322
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002324}
2325
2326template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002327 typename HalOperation = typename HalPolicy::Operation,
2328 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002329bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2330{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002331 using HalOperand = typename HalPolicy::Operand;
2332 using HalOperandType = typename HalPolicy::OperandType;
2333
2334 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002335 if (!input.IsValid())
2336 {
2337 return Fail("%s: Operation has invalid inputs", __func__);
2338 }
2339
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002340 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341 if (!output)
2342 {
2343 return Fail("%s: Could not read output 0", __func__);
2344 }
2345
2346 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002347 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002348
2349 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002350 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2351 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002352
2353 if (!weightsPin.IsValid() || !biasPin.IsValid())
2354 {
2355 return Fail("%s: Operation has invalid inputs", __func__);
2356 }
2357
2358 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002359 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002360 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2361
2362 armnn::Convolution2dDescriptor desc;
2363 desc.m_DataLayout = armnn::DataLayout::NHWC;
2364 ActivationFn activation;
2365
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002366 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002367 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002368 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2369 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2370 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2371 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2372 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2373 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002374 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002375 {
2376 return Fail("%s: Operation has invalid inputs", __func__);
2377 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002378 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002379 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380 {
2381 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002382 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2383 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2384 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002385 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002386 {
2387 return Fail("%s: Operation has invalid inputs", __func__);
2388 }
2389
2390 const uint32_t kernelX = weights.GetShape()[2];
2391 const uint32_t kernelY = weights.GetShape()[1];
2392 const uint32_t inputX = inputInfo.GetShape()[2];
2393 const uint32_t inputY = inputInfo.GetShape()[1];
2394
2395 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2396 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002397 }
2398 else
2399 {
2400 return Fail("%s: Unsupported number of operation inputs", __func__);
2401 }
2402
2403 desc.m_BiasEnabled = true;
2404 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2405
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002406 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002407 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2408 {
2409 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2410 IsConvolution2dSupported,
2411 data.m_Backends,
2412 isSupported,
2413 inputInfo,
2414 outputInfo,
2415 desc,
2416 weights.GetInfo(),
2417 biases);
2418 };
2419
2420 if(!IsDynamicTensor(outputInfo))
2421 {
2422 validateFunc(outputInfo, isSupported);
2423 }
2424 else
2425 {
2426 isSupported = AreDynamicTensorsSupported();
2427 }
2428
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002429 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002430 {
2431 return false;
2432 }
2433
2434 armnn::IConnectableLayer* startLayer =
2435 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2436
2437 if (!startLayer)
2438 {
2439 return Fail("%s: AddConvolution2dLayer failed", __func__);
2440 }
2441
Mike Kellyb5fdf382019-06-11 16:35:25 +01002442 input.Connect(startLayer->GetInputSlot(0));
2443
Kevin Mayfcf2a152020-09-08 16:06:32 +01002444 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2445 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002446}
2447
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002448template<typename HalPolicy,
2449 typename HalOperation = typename HalPolicy::Operation,
2450 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002451bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2452{
2453 using HalOperand = typename HalPolicy::Operand;
2454 using HalOperandType = typename HalPolicy::OperandType;
2455
2456 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2457 if (!input.IsValid() )
2458 {
2459 return Fail("%s: Operation has invalid inputs", __func__);
2460 }
2461
2462 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2463 unsigned int rank = inputInfo.GetNumDimensions();
2464 if (rank != 4)
2465 {
2466 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2467 }
2468
2469 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2470 if (!output)
2471 {
2472 return Fail("%s: Could not read output 0", __func__);
2473 }
2474
2475 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002476
2477 armnn::DepthToSpaceDescriptor descriptor;
2478
2479 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2480 if (descriptor.m_BlockSize <= 1)
2481 {
2482 return Fail("%s: Block size must be at least 1 in all dimensions");
2483 }
2484
2485 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002486 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002487 {
2488 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2489 }
2490
2491 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002492 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2493 {
2494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2495 IsDepthToSpaceSupported,
2496 data.m_Backends,
2497 isSupported,
2498 inputInfo,
2499 outputInfo,
2500 descriptor);
2501 };
2502
2503 if(!IsDynamicTensor(outputInfo))
2504 {
2505 validateFunc(outputInfo, isSupported);
2506 }
2507 else
2508 {
2509 isSupported = AreDynamicTensorsSupported();
2510 }
2511
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002512 if (!isSupported)
2513 {
2514 return false;
2515 }
2516
2517 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2518 assert(layer != nullptr);
2519 input.Connect(layer->GetInputSlot(0));
2520
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002521 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002522}
2523
2524template<typename HalPolicy,
2525 typename HalOperation = typename HalPolicy::Operation,
2526 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002527bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2528{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002529 using HalOperand = typename HalPolicy::Operand;
2530 using HalOperandType = typename HalPolicy::OperandType;
2531
2532 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002533
2534 if (!input.IsValid())
2535 {
2536 return Fail("%s: Operation has invalid inputs", __func__);
2537 }
2538
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002539 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002540
2541 if (!output)
2542 {
2543 return Fail("%s: Could not read output 0", __func__);
2544 }
2545
2546 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002547 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002548
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002549 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002550 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002551 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002552
2553 if (weightsOperand == nullptr)
2554 {
2555 return Fail("%s: Operand is invalid", __func__);
2556 }
2557 armnn::DepthwiseConvolution2dDescriptor desc;
2558 desc.m_DataLayout = armnn::DataLayout::NHWC;
2559
Mike Kellyb5fdf382019-06-11 16:35:25 +01002560 // Reinterpret weight data as [ H, W, I, M ]
2561 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2562 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002563 inputInfo.GetShape()[3],
2564 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002565
2566 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2567 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2568
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002569 const ConstTensorPin weightsPin =
2570 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2571 1,
2572 model,
2573 data,
2574 HWIMToMIHW,
2575 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002576
2577 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002578 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002579
2580 if (!weightsPin.IsValid() || !biasPin.IsValid())
2581 {
2582 return Fail("%s: Operation has invalid inputs", __func__);
2583 }
2584
2585 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2586 armnn::ConstTensor bias = biasPin.GetConstTensor();
2587 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2588
2589 ActivationFn activation;
2590
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002591 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002592 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002593 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2594 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2595 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2596 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2597 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2598 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002599 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002600 {
2601 return Fail("%s: Operation has invalid inputs", __func__);
2602 }
2603 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002604 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002605 {
2606 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002607 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2608 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2609 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002610 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002611 {
2612 return Fail("%s: Operation has invalid inputs", __func__);
2613 }
2614
2615 const uint32_t kernelX = weights.GetShape()[3];
2616 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002617 const uint32_t inputX = inputInfo.GetShape()[2];
2618 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002619
2620 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2621 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2622 }
2623 else
2624 {
2625 return Fail("%s: Unsupported number of operation inputs", __func__);
2626 }
2627
2628 desc.m_BiasEnabled = true;
2629 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2630
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002631 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002632 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2633 {
2634 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2635 IsDepthwiseConvolutionSupported,
2636 data.m_Backends,
2637 isSupported,
2638 inputInfo,
2639 outputInfo,
2640 desc,
2641 weights.GetInfo(),
2642 biases);
2643 };
2644
2645 if(!IsDynamicTensor(outputInfo))
2646 {
2647 validateFunc(outputInfo, isSupported);
2648 }
2649 else
2650 {
2651 isSupported = AreDynamicTensorsSupported();
2652 }
2653
2654
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002655 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002656 {
2657 return false;
2658 }
2659
2660 armnn::IConnectableLayer* startLayer =
2661 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2662 if (!startLayer)
2663 {
2664 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2665 }
2666
Mike Kellyb5fdf382019-06-11 16:35:25 +01002667 input.Connect(startLayer->GetInputSlot(0));
2668
Kevin Mayfcf2a152020-09-08 16:06:32 +01002669 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2670 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002671}
2672
Mike Kelly3c673942019-07-25 09:26:06 +01002673template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002674 typename HalOperation = typename HalPolicy::Operation,
2675 typename HalModel = typename HalPolicy::Model>
2676bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002677{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002678 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002679
2680 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2681 if (!input.IsValid())
2682 {
2683 return Fail("%s: Operation has invalid input", __func__);
2684 }
2685
Sadik Armagan98c0f662019-11-21 15:54:36 +00002686 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2687 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2688 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2689 {
2690 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2691 }
2692
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002693 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002694 if (!outputOperand)
2695 {
2696 return Fail("%s: Operation has invalid outputs", __func__);
2697 }
2698
2699 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002700
2701 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002702 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2703 {
2704 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2705 IsDequantizeSupported,
2706 data.m_Backends,
2707 isSupported,
2708 inputInfo,
2709 outputInfo);
2710 };
2711
2712 if(IsDynamicTensor(outputInfo))
2713 {
2714 isSupported = AreDynamicTensorsSupported();
2715 }
2716 else
2717 {
2718 validateFunc(outputInfo, isSupported);
2719 }
2720
Mike Kelly46272802019-08-14 17:00:48 +01002721 if (!isSupported)
2722 {
2723 return false;
2724 }
2725
2726 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2727 assert(layer != nullptr);
2728 input.Connect(layer->GetInputSlot(0));
2729
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002730 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002731}
2732
2733template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002734 typename HalOperation = typename HalPolicy::Operation,
2735 typename HalModel = typename HalPolicy::Model>
2736bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002737{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002738 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002739
2740 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2741 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2742
2743 if (!input0.IsValid() || !input1.IsValid())
2744 {
2745 return Fail("%s: Operation has invalid inputs", __func__);
2746 }
2747
2748 // The FuseActivation parameter is always the input index 2
2749 // and it should be optional
2750 ActivationFn activationFunction;
2751 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2752 {
2753 return Fail("%s: Operation has invalid inputs", __func__);
2754 }
2755
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002756 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002757 if (!output)
2758 {
2759 return Fail("%s: Could not read output 0", __func__);
2760 }
2761
2762 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002763
2764 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002765 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2766 {
2767 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2768 IsDivisionSupported,
2769 data.m_Backends,
2770 isSupported,
2771 input0.GetTensorInfo(),
2772 input1.GetTensorInfo(),
2773 outputInfo);
2774 };
2775
2776 if(!IsDynamicTensor(outputInfo))
2777 {
2778 validateFunc(outputInfo, isSupported);
2779 }
2780 else
2781 {
2782 isSupported = AreDynamicTensorsSupported();
2783 }
2784
Mike Kelly46272802019-08-14 17:00:48 +01002785 if (!isSupported)
2786 {
2787 return false;
2788 }
2789
2790 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002791
Kevin Mayfcf2a152020-09-08 16:06:32 +01002792 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2793 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002794 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002795 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002796 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002797
2798 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2799 data, nullptr, validateFunc, activationFunction);
2800
Mike Kelly46272802019-08-14 17:00:48 +01002801}
2802
2803template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002804 typename HalOperation = typename HalPolicy::Operation,
2805 typename HalModel = typename HalPolicy::Model>
2806bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002807{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002808 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002809
2810 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2811 if (!input.IsValid())
2812 {
2813 return Fail("%s: Operation has invalid inputs", __func__);
2814 }
2815
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002816 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002817 if (!outputOperand)
2818 {
2819 return Fail("%s: Operation has invalid outputs", __func__);
2820 }
2821
2822 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002823
2824 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002825 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2826 {
2827 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2828 IsFloorSupported,
2829 data.m_Backends,
2830 isSupported,
2831 input.GetTensorInfo(),
2832 outputInfo);
2833 };
2834
2835 if(!IsDynamicTensor(outputInfo))
2836 {
2837 validateFunc(outputInfo, isSupported);
2838 }
2839 else
2840 {
2841 isSupported = AreDynamicTensorsSupported();
2842 }
2843
Mike Kelly46272802019-08-14 17:00:48 +01002844 if (!isSupported)
2845 {
2846 return false;
2847 }
2848
2849 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2850 assert(layer != nullptr);
2851 input.Connect(layer->GetInputSlot(0));
2852
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002853 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002854}
2855
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002856inline bool IsQSymm8(const V1_0::Operand&)
2857{
2858 return false;
2859}
2860
Kevin May42477c12020-03-26 13:34:14 +00002861#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002862
2863inline bool IsQSymm8(const V1_2::Operand& operand)
2864{
2865 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2866}
2867
2868#endif
2869
Kevin May42477c12020-03-26 13:34:14 +00002870#ifdef ARMNN_ANDROID_NN_V1_3
2871
2872inline bool IsQSymm8(const V1_3::Operand& operand)
2873{
2874 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2875}
2876
2877#endif
2878
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002879enum class DequantizeStatus
2880{
2881 SUCCESS,
2882 NOT_REQUIRED,
2883 INVALID_OPERAND
2884};
2885
2886using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2887
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002888template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002889 typename HalOperation = typename HalPolicy::Operation,
2890 typename HalModel = typename HalPolicy::Model>
2891DequantizeResult DequantizeIfRequired(size_t operand_index,
2892 const HalOperation& operation,
2893 const HalModel& model,
2894 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002895{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002896 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002897
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002898 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002899 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002900 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002901 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002902 }
2903
2904 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2905 {
2906 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002907 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002908 }
2909
2910 const size_t weightsInputIndex = operation.inputs[operand_index];
2911
2912 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2913 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002914 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002915 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002916 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002917 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002918 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2919 {
2920 continue;
2921 }
2922
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002923 size_t outOpIndex = weightsInputIndex + 1;
2924 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002925 {
2926 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002927 }
2928
2929 if (outOpIndex != weightsInputIndex)
2930 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002931 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002932 }
2933
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002934 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002935 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002936
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002937 if (!IsQSymm8(*operand))
2938 {
2939 // Only supporting dequantize from QSYMM8 to FLOAT
2940 break;
2941 }
2942
2943 // Allocate a new buffer for the dequantized data and manually dequantize
2944 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2945 if (!startValue)
2946 {
2947 // Failed to get the operand address
2948 break;
2949 }
2950
2951 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2952 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002953 const float quantizationScale = operand->scale;
2954
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002955 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2956 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2957 {
2958 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002959 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002960 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2961 }
2962
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002963 // Construct tensor info for dequantized ConstTensor
2964 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2965 operand->dimensions.data(),
2966 armnn::DataType::Float32);
2967
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002968 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2969 std::move(tensorInfo),
2970 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002971 }
2972
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002973 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002974}
2975
2976template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002977 typename HalOperation = typename HalPolicy::Operation,
2978 typename HalModel = typename HalPolicy::Model>
2979ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2980 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002981 const ConversionData& data,
2982 size_t operandIndex,
2983 bool optional = false)
2984{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002985 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2986
2987 DequantizeStatus status = std::get<3>(dequantized);
2988 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002989 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002990 case DequantizeStatus::INVALID_OPERAND:
2991 {
2992 // return invalid const tensor pin
2993 return ConstTensorPin();
2994 }
2995 case DequantizeStatus::NOT_REQUIRED:
2996 {
2997 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2998 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2999 }
3000 case DequantizeStatus::SUCCESS:
3001 default:
3002 {
3003 return ConstTensorPin(
3004 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3005 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003006 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003007}
3008
3009
Mike Kelly46272802019-08-14 17:00:48 +01003010template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003011 typename HalOperation = typename HalPolicy::Operation,
3012 typename HalModel = typename HalPolicy::Model>
3013bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003014{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003015 using HalOperand = typename HalPolicy::Operand;
3016
Mike Kelly46272802019-08-14 17:00:48 +01003017 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3018 if (!input.IsValid())
3019 {
3020 return Fail("%s: Operation has invalid inputs", __func__);
3021 }
3022
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003023 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003024 if (!output)
3025 {
3026 return Fail("%s: Could not read output 0", __func__);
3027 }
3028
3029 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3030 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3031
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003032 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3033 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003034
3035 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01003036 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003037 return Fail("%s: Operation has invalid weights", __func__);
3038 }
3039
3040 if (!biasPin.IsValid())
3041 {
3042 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003043 }
3044
3045 armnn::ConstTensor weights = weightsPin.GetConstTensor();
3046 armnn::ConstTensor bias = biasPin.GetConstTensor();
3047 armnn::TensorInfo reshapedInfo = inputInfo;
3048
3049 try
3050 {
3051 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003052 }
3053 catch (const std::exception& e)
3054 {
Mike Kelly46272802019-08-14 17:00:48 +01003055 return Fail("%s: %s", __func__, e.what());
3056 }
3057
3058 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3059 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
3060
3061 ActivationFn activationFunction;
3062 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3063 {
3064 return Fail("%s: Operation has invalid inputs", __func__);
3065 }
3066
3067 armnn::FullyConnectedDescriptor desc;
3068 desc.m_TransposeWeightMatrix = true;
3069 desc.m_BiasEnabled = true;
3070
3071 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003072 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3073 {
Finn Williams49184462020-10-02 13:28:34 +01003074 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3075 weights.GetInfo().GetShape(),
3076 outputInfo.GetShape(),
3077 desc.m_TransposeWeightMatrix))
3078 {
3079 isSupported = false;
3080 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3081 return;
3082 }
3083
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003084 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly46272802019-08-14 17:00:48 +01003085 IsFullyConnectedSupported,
3086 data.m_Backends,
3087 isSupported,
3088 reshapedInfo,
3089 outputInfo,
3090 weights.GetInfo(),
3091 bias.GetInfo(),
3092 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003093 };
3094
3095 if(!IsDynamicTensor(outputInfo))
3096 {
3097 validateFunc(outputInfo, isSupported);
3098 }
3099 else
3100 {
3101 isSupported = AreDynamicTensorsSupported();
3102 }
3103
Mike Kelly46272802019-08-14 17:00:48 +01003104 if (!isSupported)
3105 {
3106 return false;
3107 }
3108
3109 armnn::IConnectableLayer* startLayer =
3110 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Mike Kelly46272802019-08-14 17:00:48 +01003111
Kevin Mayfcf2a152020-09-08 16:06:32 +01003112 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003113 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003114 armnn::ReshapeDescriptor reshapeDescriptor;
3115 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003116
Kevin Mayfcf2a152020-09-08 16:06:32 +01003117 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3118 assert(reshapeLayer != nullptr);
3119 input.Connect(reshapeLayer->GetInputSlot(0));
3120 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3121 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003122 }
3123 else
3124 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003125 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003126 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003127
3128 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3129 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003130}
3131
3132template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003133 typename HalOperation = typename HalPolicy::Operation,
3134 typename HalModel = typename HalPolicy::Model>
3135bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003136{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003137 using HalOperand = typename HalPolicy::Operand;
3138
Mike Kelly999e2092019-08-15 10:46:46 +01003139 if (operation.inputs.size() != 1)
3140 {
3141 return Fail("%s: Optional inputs are not supported", __func__);
3142 }
3143
Mike Kelly46272802019-08-14 17:00:48 +01003144 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3145 if (!input.IsValid())
3146 {
3147 return Fail("%s: Operation has invalid inputs", __func__);
3148 }
3149
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003150 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003151 if (!output)
3152 {
3153 return Fail("%s: Could not read output 0", __func__);
3154 }
3155
3156 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3157 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3158
Mike Kelly46272802019-08-14 17:00:48 +01003159 if (outputInfo.GetNumDimensions() != 4u)
3160 {
3161 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3162 }
3163
3164 armnn::L2NormalizationDescriptor desc;
3165 desc.m_DataLayout = armnn::DataLayout::NHWC;
3166
3167 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003168 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3169 {
3170 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3171 IsL2NormalizationSupported,
3172 data.m_Backends,
3173 isSupported,
3174 inputInfo,
3175 outputInfo,
3176 desc);
3177 };
3178
3179 if(!IsDynamicTensor(outputInfo))
3180 {
3181 validateFunc(outputInfo, isSupported);
3182 }
3183 else
3184 {
3185 isSupported = AreDynamicTensorsSupported();
3186 }
3187
Mike Kelly46272802019-08-14 17:00:48 +01003188 if (!isSupported)
3189 {
3190 return false;
3191 }
3192
3193 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3194 assert(layer != nullptr);
3195 input.Connect(layer->GetInputSlot(0));
3196
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003197 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003198}
3199
3200template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003201 typename HalOperation = typename HalPolicy::Operation,
3202 typename HalModel = typename HalPolicy::Model>
3203bool ConvertLocalResponseNormalization(const HalOperation& operation,
3204 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003205 ConversionData& data)
3206{
Mike Kelly999e2092019-08-15 10:46:46 +01003207 if (operation.inputs.size() != 5)
3208 {
3209 return Fail("%s: Optional inputs are not supported", __func__);
3210 }
3211
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003212 using HalOperand = typename HalPolicy::Operand;
3213 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003214
3215 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3216 if (!input.IsValid())
3217 {
3218 return Fail("%s: Operation has invalid inputs", __func__);
3219 }
3220
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003221 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003222 if (!output)
3223 {
3224 return Fail("%s: Could not read output 0", __func__);
3225 }
3226
3227 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3228 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3229
Mike Kelly46272802019-08-14 17:00:48 +01003230 if (outputInfo.GetNumDimensions() != 4u)
3231 {
3232 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3233 }
3234
3235 armnn::NormalizationDescriptor descriptor;
3236 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3237 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3238 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3239
3240 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003241 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003242 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3243 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3244 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3245 {
3246 return Fail("%s: Operation has invalid inputs", __func__);
3247 }
3248
3249 // ArmNN expects normSize to be the full size of the normalization
3250 // window rather than the radius as in AndroidNN.
3251 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3252
3253 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003254 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3255 {
3256 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3257 IsNormalizationSupported,
3258 data.m_Backends,
3259 isSupported,
3260 inputInfo,
3261 outputInfo,
3262 descriptor);
3263 };
3264
3265 if(!IsDynamicTensor(outputInfo))
3266 {
3267 validateFunc(outputInfo, isSupported);
3268 }
3269 else
3270 {
3271 isSupported = AreDynamicTensorsSupported();
3272 }
3273
Mike Kelly46272802019-08-14 17:00:48 +01003274 if (!isSupported)
3275 {
3276 return false;
3277 }
3278
3279
3280 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3281 assert(layer != nullptr);
3282 input.Connect(layer->GetInputSlot(0));
3283
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003284 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003285}
3286
3287template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003288 typename HalOperation = typename HalPolicy::Operation,
3289 typename HalModel = typename HalPolicy::Model>
3290bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003291{
Mike Kelly46272802019-08-14 17:00:48 +01003292 armnn::ActivationDescriptor desc;
3293 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3294
3295 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3296}
3297
3298template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003299 typename HalOperation = typename HalPolicy::Operation,
3300 typename HalModel = typename HalPolicy::Model>
3301bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003302{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003303 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003304
3305 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3306 if (!input.IsValid())
3307 {
3308 return Fail("%s: Operation has invalid inputs", __func__);
3309 }
3310
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003311 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003312 if (!output)
3313 {
3314 return Fail("%s: Could not read output 0", __func__);
3315 }
3316
3317 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003318
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003319 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003320 if (!axisOperand)
3321 {
3322 return Fail("%s: Could not read input 1", __func__);
3323 }
3324
3325 std::vector<int32_t> axis;
3326 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3327 {
3328 return Fail("%s: Input 1 has invalid values", __func__);
3329 }
3330
3331 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3332
3333 // Convert the axis to unsigned int and remove duplicates.
3334 unsigned int rank = inputInfo.GetNumDimensions();
3335 std::set<unsigned int> uniqueAxis;
3336 std::transform(axis.begin(), axis.end(),
3337 std::inserter(uniqueAxis, uniqueAxis.begin()),
3338 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3339
3340 // Get the "keep dims" flag.
3341 int32_t keepDims = 0;
3342 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3343 {
3344 return Fail("%s: Could not read input 2", __func__);
3345 }
3346
3347 armnn::MeanDescriptor descriptor;
3348 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3349 descriptor.m_KeepDims = keepDims > 0;
3350
3351 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003352 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3353 {
3354 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3355 IsMeanSupported,
3356 data.m_Backends,
3357 isSupported,
3358 inputInfo,
3359 outputInfo,
3360 descriptor);
3361 };
3362
3363 if(!IsDynamicTensor(outputInfo))
3364 {
3365 validateFunc(outputInfo, isSupported);
3366 }
3367 else
3368 {
3369 isSupported = AreDynamicTensorsSupported();
3370 }
3371
Mike Kelly46272802019-08-14 17:00:48 +01003372 if (!isSupported)
3373 {
3374 return false;
3375 }
3376
3377 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3378 assert(layer != nullptr);
3379 input.Connect(layer->GetInputSlot(0));
3380
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003381 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003382}
3383
3384template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003385 typename HalOperation = typename HalPolicy::Operation,
3386 typename HalModel = typename HalPolicy::Model>
3387bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003388{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003389 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003390
3391 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3392 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3393
3394 if (!input0.IsValid() || !input1.IsValid())
3395 {
3396 return Fail("%s: Operation has invalid inputs", __func__);
3397 }
3398
3399 // The FuseActivation parameter is always the input index 2
3400 // and it should be optional
3401 ActivationFn activationFunction;
3402 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3403 {
3404 return Fail("%s: Operation has invalid inputs", __func__);
3405 }
3406
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003407 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003408
3409 if (outputOperand == nullptr)
3410 {
3411 return false;
3412 }
3413
3414 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003415
3416 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003417 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3418 {
3419 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3420 IsMultiplicationSupported,
3421 data.m_Backends,
3422 isSupported,
3423 input0.GetTensorInfo(),
3424 input1.GetTensorInfo(),
3425 outputInfo);
3426 };
3427
3428 if(!IsDynamicTensor(outputInfo))
3429 {
3430 validateFunc(outputInfo, isSupported);
3431 }
3432 else
3433 {
3434 isSupported = AreDynamicTensorsSupported();
3435 }
3436
Mike Kelly46272802019-08-14 17:00:48 +01003437 if (!isSupported)
3438 {
3439 return false;
3440 }
3441
3442 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003443
3444 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3445 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3446
Kevin Mayfcf2a152020-09-08 16:06:32 +01003447 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3448 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003449 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003450 return false;
3451 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003452
Kevin Mayfcf2a152020-09-08 16:06:32 +01003453 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3454 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003455}
3456
3457template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003458 typename HalOperation = typename HalPolicy::Operation,
3459 typename HalModel = typename HalPolicy::Model>
3460bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003461{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003462 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003463
Mike Kelly3c673942019-07-25 09:26:06 +01003464 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3465 if (!input.IsValid())
3466 {
3467 return Fail("%s: Operation has invalid inputs", __func__);
3468 }
3469
3470 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3471 unsigned int rank = inputInfo.GetNumDimensions();
3472
3473 armnn::PadDescriptor descriptor;
3474 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3475 {
3476 return Fail("%s: Could not convert paddings", __func__);
3477 }
3478
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003479 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3480 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003481 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3482 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3483 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003484 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003485 {
3486 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3487 }
3488
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003489 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003490 if (!output)
3491 {
3492 return Fail("%s: Could not read output", __func__);
3493 }
3494
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003495 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003496
3497 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003498 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3499 {
3500 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3501 IsPadSupported,
3502 data.m_Backends,
3503 isSupported,
3504 inputInfo,
3505 outputInfo,
3506 descriptor);
3507 };
3508
3509 if(!IsDynamicTensor(outputInfo))
3510 {
3511 validateFunc(outputInfo, isSupported);
3512 }
3513 else
3514 {
3515 isSupported = AreDynamicTensorsSupported();
3516 }
3517
Mike Kelly3c673942019-07-25 09:26:06 +01003518 if (!isSupported)
3519 {
3520 return false;
3521 }
3522
3523 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3524 assert(layer != nullptr);
3525 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003526
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003527 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003528}
3529
Mike Kelly0a879362019-07-29 16:56:31 +01003530template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003531 typename HalOperation = typename HalPolicy::Operation,
3532 typename HalModel = typename HalPolicy::Model>
3533bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003534{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003535 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003536
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003537 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3538 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3539 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003540
3541 if (inputOperand == nullptr
3542 || requestedShapeOperand == nullptr
3543 || outputOperand == nullptr)
3544 {
3545 return Fail("%s: Operation has invalid inputs", __func__);
3546 }
3547
3548 if (requestedShapeOperand->dimensions.size() != 1)
3549 {
3550 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3551 __func__, requestedShapeOperand->dimensions.size());
3552 }
3553
3554 std::vector<int32_t> targetDimensions;
3555 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3556 {
3557 return Fail("%s: Could not read values of input 1", __func__);
3558 }
3559
3560 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3561
3562 Shape requestedShape;
3563 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3564 // function that resolves these values into a fully specified tensor shape.
3565 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3566 {
3567 return Fail("%s: Failed to resolve the requested shape", __func__);
3568 }
3569
Mike Kelly46272802019-08-14 17:00:48 +01003570 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3571 if (!input.IsValid())
3572 {
3573 return Fail("%s: Could not read input 0", __func__);
3574 }
3575
3576 armnn::ReshapeDescriptor reshapeDescriptor;
3577 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3578 requestedShape.dimensions.data());
3579
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003580 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3581
Mike Kelly46272802019-08-14 17:00:48 +01003582 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003583 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3584 {
3585 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3586 IsReshapeSupported,
3587 data.m_Backends,
3588 isSupported,
3589 input.GetTensorInfo(),
3590 outputInfo,
3591 reshapeDescriptor);
3592 };
3593
3594 if(!IsDynamicTensor(outputInfo))
3595 {
3596 validateFunc(outputInfo, isSupported);
3597 }
3598 else
3599 {
3600 isSupported = AreDynamicTensorsSupported();
3601 }
3602
Mike Kelly46272802019-08-14 17:00:48 +01003603 if (!isSupported)
3604 {
3605 return false;
3606 }
3607
3608 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3609 assert(layer != nullptr);
3610 input.Connect(layer->GetInputSlot(0));
3611
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003612 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003613}
3614
3615template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003616 typename HalOperation = typename HalPolicy::Operation,
3617 typename HalModel = typename HalPolicy::Model>
3618bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003619{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003620 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003621
Mike Kelly0a879362019-07-29 16:56:31 +01003622 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3623 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3624
3625 if (!input0.IsValid() || !input1.IsValid())
3626 {
3627 return Fail("%s: Operation has invalid inputs", __func__);
3628 }
3629
3630 // The FuseActivation parameter is always the input index 2
3631 // and it should be optional
3632 ActivationFn activationFunction;
3633 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3634 {
3635 return Fail("%s: Operation has invalid inputs", __func__);
3636 }
3637
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003638 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003639 if (!output)
3640 {
3641 return Fail("%s: Could not read output 0", __func__);
3642 }
3643
3644 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003645
3646 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003647 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3648 {
3649 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3650 IsSubtractionSupported,
3651 data.m_Backends,
3652 isSupported,
3653 input0.GetTensorInfo(),
3654 input1.GetTensorInfo(),
3655 outputInfo);
3656 };
3657
3658 if(IsDynamicTensor(outputInfo))
3659 {
3660 isSupported = AreDynamicTensorsSupported();
3661 }
3662 else
3663 {
3664 validateFunc(outputInfo, isSupported);
3665 }
3666
Mike Kelly0a879362019-07-29 16:56:31 +01003667 if (!isSupported)
3668 {
3669 return false;
3670 }
3671
3672 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003673
3674 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3675 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3676
Kevin Mayfcf2a152020-09-08 16:06:32 +01003677 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3678 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003679 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003680 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003681 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003682 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3683 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003684}
3685
Finn Williams23b87b32019-07-30 11:44:05 +01003686template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003687 typename HalOperation = typename HalPolicy::Operation,
3688 typename HalModel = typename HalPolicy::Model>
3689bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003690{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003691 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003692
3693 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3694 if (!input.IsValid())
3695 {
3696 return Fail("%s: Operation has invalid inputs", __func__);
3697 }
3698
3699 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3700 unsigned int rank = inputInfo.GetNumDimensions();
3701 if (rank > 4)
3702 {
3703 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3704 }
3705
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003706 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003707 if (!output)
3708 {
3709 return Fail("%s: Could not read output 0", __func__);
3710 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003711
3712 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003713 {
3714 return Fail("%s: Dynamic output tensors are not supported", __func__);
3715 }
3716
3717 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3718 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003719 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003720
3721 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3722
3723 std::vector<int32_t> axis;
3724 if (!axisOperand)
3725 {
3726 axis.assign(dimensionSequence,
3727 dimensionSequence + rank);
3728 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003729 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003730 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003731 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003732 }
3733
3734 std::vector<uint32_t> outputDims;
3735 for (unsigned int i = 0; i < rank; i++)
3736 {
3737 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3738 auto currentDimension = inputInfo.GetShape()[i];
3739 if (skipSqueeze || currentDimension != 1)
3740 {
3741 outputDims.push_back(currentDimension);
3742 }
3743 }
3744
3745 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3746
3747 armnn::TensorInfo outputInfo = inputInfo;
3748 outputInfo.SetShape(outShape);
3749
3750 armnn::ReshapeDescriptor reshapeDesc;
3751 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3752
3753 bool isSupported = false;
3754 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3755 IsReshapeSupported,
3756 data.m_Backends,
3757 isSupported,
3758 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003759 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003760 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003761
Mike Kelly46272802019-08-14 17:00:48 +01003762 if (!isSupported)
3763 {
3764 return false;
3765 }
3766
3767 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3768 assert(layer != nullptr);
3769 input.Connect(layer->GetInputSlot(0));
3770
3771 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3772}
3773
3774template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003775 typename HalOperation = typename HalPolicy::Operation,
3776 typename HalModel = typename HalPolicy::Model>
3777bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003778{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003779 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003780
3781 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3782 if (!input.IsValid())
3783 {
3784 return Fail("%s: Operation has invalid inputs", __func__);
3785 }
3786
3787 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3788 unsigned int rank = inputInfo.GetNumDimensions();
3789 if (rank > 4)
3790 {
3791 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3792 }
3793
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003794 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003795 if (!output)
3796 {
3797 return Fail("%s: Could not read output 0", __func__);
3798 }
3799
3800 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003801
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003802 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3803 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3804 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003805
3806 std::vector<int32_t> beginValues;
3807 std::vector<int32_t> endValues;
3808 std::vector<int32_t> stridesValues;
3809
3810 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003811 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003812 {
3813 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3814 {
3815 return false;
3816 }
3817
3818 if (operandValues.size() != rank)
3819 {
3820 return false;
3821 }
3822
3823 return true;
3824 };
3825
3826 if (!ValidateInputOperands(*beginOperand, beginValues)
3827 || !ValidateInputOperands(*endOperand, endValues)
3828 || !ValidateInputOperands(*stridesOperand, stridesValues))
3829 {
3830 return Fail("%s: Operation has invalid input operand", __func__);
3831 }
3832
3833 // Stride cannot have value '0'
3834 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3835 {
3836 return Fail("%s: Stride must be non-zero value.", __func__);
3837 }
3838
3839 armnn::StridedSliceDescriptor descriptor;
3840 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3841 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3842 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3843 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3844
3845 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3846 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3847 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3848 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3849 {
3850 return Fail("%s: Operation has invalid inputs", __func__);
3851 }
3852
3853 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003854 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3855 {
3856 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3857 IsStridedSliceSupported,
3858 data.m_Backends,
3859 isSupported,
3860 inputInfo,
3861 outputInfo,
3862 descriptor);
3863 };
3864
3865 if(IsDynamicTensor(outputInfo))
3866 {
3867 isSupported = AreDynamicTensorsSupported();
3868 }
3869 else
3870 {
3871 validateFunc(outputInfo, isSupported);
3872 }
3873
Mike Kelly46272802019-08-14 17:00:48 +01003874 if (!isSupported)
3875 {
3876 return false;
3877 }
3878
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003879 // Check if slice can fit in a inferred output
3880 armnn::TensorShape inputShape = inputInfo.GetShape();
3881 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3882 {
3883 int stride = descriptor.m_Stride[i];
3884 int start = descriptor.GetStartForAxis(inputShape, i);
3885 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3886
3887 if (descriptor.m_ShrinkAxisMask & (1 << i))
3888 {
3889 // If the difference between the start point and the end point of the slice on an axis being shrunk
3890 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3891 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3892 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3893 {
3894 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3895 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003896
3897 if(stride < 0)
3898 {
3899 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3900 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003901 }
3902 }
3903
Mike Kelly46272802019-08-14 17:00:48 +01003904 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3905 assert(layer != nullptr);
3906 input.Connect(layer->GetInputSlot(0));
3907
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003908 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003909}
3910
3911template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003912 typename HalOperation = typename HalPolicy::Operation,
3913 typename HalModel = typename HalPolicy::Model>
3914bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003915{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003916 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003917 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003918
3919 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3920 if (!input.IsValid())
3921 {
3922 return Fail("%s: Operation has invalid inputs", __func__);
3923 }
3924
3925 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3926 unsigned int rank = inputInfo.GetNumDimensions();
3927 if (rank > 4)
3928 {
3929 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3930 }
3931
3932 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3933 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003934 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003935
3936 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003937 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003938 {
Mike Kelly46272802019-08-14 17:00:48 +01003939 for (unsigned int i = rank; i > 0; i--)
3940 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003941 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003942 }
3943 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003944 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003945 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003946 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003947 }
3948
3949 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3950
Mike Kelly4a956582020-02-28 10:32:09 +00003951 armnn::TransposeDescriptor transposeDesc;
3952 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003953
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003954 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003955 if (!output)
3956 {
3957 return Fail("%s: Could not read output 0", __func__);
3958 }
3959
3960 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3961
3962 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003963 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3964 {
3965 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3966 IsTransposeSupported,
3967 data.m_Backends,
3968 isSupported,
3969 inputInfo,
3970 outputInfo,
3971 transposeDesc);
3972 };
3973
3974 if(IsDynamicTensor(outputInfo))
3975 {
3976 isSupported = AreDynamicTensorsSupported();
3977 }
3978 else
3979 {
3980 validateFunc(outputInfo, isSupported);
3981 }
3982
Mike Kelly46272802019-08-14 17:00:48 +01003983 if (!isSupported)
3984 {
3985 return false;
3986 }
3987
Mike Kelly4a956582020-02-28 10:32:09 +00003988 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003989 assert(layer != nullptr);
3990 input.Connect(layer->GetInputSlot(0));
3991
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003992 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003993}
3994
3995template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003996 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003997 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003998 typename HalModel = typename HalPolicy::Model>
3999bool ConvertBatchToSpaceNd(const HalOperation& operation,
4000 const HalModel& model,
4001 ConversionData& data)
4002{
Finn Williams23b87b32019-07-30 11:44:05 +01004003
4004 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4005 if (!input.IsValid())
4006 {
4007 return Fail("%s: Operation has invalid inputs", __func__);
4008 }
4009
4010 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4011 if (!output)
4012 {
4013 return Fail("%s: Could not read output 0", __func__);
4014 }
4015
4016 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004017
4018 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4019 if (!blockOperand)
4020 {
4021 return Fail("%s: Could not read input 1", __func__);
4022 }
4023
4024 // Convert the block operand to int32
4025 std::vector<int32_t> block;
4026 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4027 {
4028 return Fail("%s: Input 1 has invalid values", __func__);
4029 }
4030
4031 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4032
4033 unsigned int rank = inputInfo.GetNumDimensions();
4034 if (rank != 4)
4035 {
4036 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4037 }
4038
4039 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4040 {
4041 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4042 " greater than or equal to 1", __func__);
4043 }
4044
4045 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4046 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4047 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4048
Kevin May42477c12020-03-26 13:34:14 +00004049 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004050 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004051 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004052 }
4053 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4054 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4055
4056 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004057 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4058 {
4059 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4060 IsBatchToSpaceNdSupported,
4061 data.m_Backends,
4062 isSupported,
4063 inputInfo,
4064 outputInfo,
4065 batchToSpaceNdDesc);
4066 };
4067
4068 if(!IsDynamicTensor(outputInfo))
4069 {
4070 validateFunc(outputInfo, isSupported);
4071 }
4072 else
4073 {
4074 isSupported = AreDynamicTensorsSupported();
4075 }
4076
4077
Finn Williams23b87b32019-07-30 11:44:05 +01004078 if (!isSupported)
4079 {
4080 return false;
4081 }
4082
4083 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4084 assert(layer != nullptr);
4085 input.Connect(layer->GetInputSlot(0));
4086
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004087 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004088}
Mike Kelly0a879362019-07-29 16:56:31 +01004089
Finn Williamsd74c5052019-07-30 17:06:00 +01004090template<typename HalPolicy,
4091 typename HalOperation = typename HalPolicy::Operation,
4092 typename HalOperand = typename HalPolicy::Operand,
4093 typename HalModel = typename HalPolicy::Model>
4094bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4095{
4096 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4097 if (!input.IsValid())
4098 {
4099 return Fail("%s: Operation has invalid inputs", __func__);
4100 }
4101
4102 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4103 unsigned int rank = inputInfo.GetNumDimensions();
4104 unsigned int spatialDim = rank - 2;
4105
4106 if (rank != 4)
4107 {
4108 Fail("%s: Only inputs with rank 4 are supported", __func__);
4109 }
4110
4111 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4112 if (!output)
4113 {
4114 return Fail("%s: Could not read output 0", __func__);
4115 }
4116
4117 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004118
4119 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4120 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4121
4122 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4123 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4124 {
4125 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4126 }
4127
4128 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004129 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4130 {
4131 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4132 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004133 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4134 {
4135 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4136 }
4137
4138 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4139 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4140 {
4141 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4142 }
4143
4144 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4145 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004146 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4147 {
4148 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4149 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004150 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4151 {
4152 int paddingBeforeInput = paddings[i];
4153 int paddingAfterInput = paddings[i + 1];
4154 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4155 {
4156 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4157 }
4158
4159 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4160 }
4161
4162 armnn::SpaceToBatchNdDescriptor descriptor;
4163 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4164 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4165 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4166
Kevin May42477c12020-03-26 13:34:14 +00004167 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004168 {
4169 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4170 }
4171
4172 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004173 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4174 {
4175 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4176 IsSpaceToBatchNdSupported,
4177 data.m_Backends,
4178 isSupported,
4179 inputInfo,
4180 outputInfo,
4181 descriptor);
4182 };
4183
4184 if(IsDynamicTensor(outputInfo))
4185 {
4186 isSupported = AreDynamicTensorsSupported();
4187 }
4188 else
4189 {
4190 validateFunc(outputInfo, isSupported);
4191 }
4192
Finn Williamsd74c5052019-07-30 17:06:00 +01004193 if (!isSupported)
4194 {
4195 return false;
4196 }
4197
4198 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4199 assert(layer != nullptr);
4200 input.Connect(layer->GetInputSlot(0));
4201
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004202 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004203}
4204
saoste01b8471482018-10-10 09:44:51 +01004205} // namespace armnn_driver