blob: ca5c99ec775442c6779416098c859521dfbfe7b0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010012#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000013#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010014#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
James Ward4e22f602020-10-20 15:50:33 +010025#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026
27#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010028#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010029
30namespace armnn_driver
31{
32
33///
34/// Helper classes
35///
36
Kevin Mayec1e5b82020-02-26 17:00:39 +000037#ifdef ARMNN_ANDROID_R
38using OperandType = android::nn::hal::OperandType;
39#endif
40
Sadik Armagan188675f2021-02-12 17:16:42 +000041#ifdef ARMNN_ANDROID_S
42#include <nnapi/Types.h>
43#endif
44
45
arovir01b0717b52018-09-05 17:03:25 +010046struct ConversionData
47{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010048 ConversionData(const std::vector<armnn::BackendId>& backends)
49 : m_Backends(backends)
50 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010051 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010052 {}
53
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010054 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010055 armnn::INetworkPtr m_Network;
56 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
57 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010058 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010059};
60
61class LayerInputHandle
62{
63public:
64 LayerInputHandle();
65 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
66
67 bool IsValid() const;
68
69 void Connect(armnn::IInputSlot& inputSlot);
70
Finn Williamsa4983ce2020-07-23 12:55:12 +010071 void Disconnect(armnn::IInputSlot& inputSlot);
72
arovir01b0717b52018-09-05 17:03:25 +010073 const armnn::TensorInfo& GetTensorInfo() const;
74
75private:
76 armnn::IOutputSlot* m_OutputSlot;
77 bool m_Valid;
78 armnn::TensorInfo m_TensorInfo;
79};
80
81class ConstTensorPin
82{
83public:
84 // Creates an invalid tensor pin (can be used to signal errors)
85 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
86 ConstTensorPin(bool optional = false);
87
88 // @param tensorInfo TensorInfo associated with the tensor.
89 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
90 // the model being converted.
91 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010092 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010093 const armnn::PermutationVector& mappings);
94
95 ConstTensorPin(const ConstTensorPin& other) = delete;
96 ConstTensorPin(ConstTensorPin&& other) = default;
97
98 bool IsValid() const;
99 bool IsOptional() const;
100
101 const armnn::ConstTensor& GetConstTensor() const;
102 const armnn::ConstTensor* GetConstTensorPtr() const;
103
104private:
105 armnn::ConstTensor m_ConstTensor;
106
107 // Owned memory for swizzled tensor data, only required if the tensor needed
108 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
109 // the pools associated with the model being converted.
110 std::vector<uint8_t> m_SwizzledTensorData;
111
112 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
113 bool m_Optional;
114};
115
116} // namespace armnn_driver
117
118///
119/// Utility functions
120///
121
122namespace
123{
124
125using namespace armnn_driver;
126using namespace android::nn;
127
128// Convenience function to log the reason for failing to convert a model.
129// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
130template<class... Args>
131static bool Fail(const char* formatStr, Args&&... args)
132{
133 ALOGD(formatStr, std::forward<Args>(args)...);
134 return false;
135}
136
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100137// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
138// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
139#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140try \
141{ \
142 for (auto&& backendId : backends) \
143 { \
144 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000145 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100146 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100147 std::string reasonIfUnsupported; \
148 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000149 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100150 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100151 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 } \
154 else \
155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 if (reasonIfUnsupported.size() > 0) \
157 { \
158 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
159 } \
160 else \
161 { \
162 ALOGD("%s: not supported by armnn", funcName); \
163 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100164 } \
165 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100166 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100167 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100168 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100169 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100170 } \
171 if (!supported) \
172 { \
173 ALOGD("%s: not supported by any specified backend", funcName); \
174 } \
175} \
176catch (const armnn::InvalidArgumentException &e) \
177{ \
178 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
179}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100180
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000181template<typename HalOperand>
182armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100183{
184 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
185}
186
Matthew Bentham912b3622019-05-03 15:49:14 +0100187inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100188{
Matthew Bentham912b3622019-05-03 15:49:14 +0100189 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
190 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
191 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100192}
193
Kevin May42477c12020-03-26 13:34:14 +0000194#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100195
Keith Davis71006492020-01-06 17:44:16 +0000196// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100197inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
198{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000200 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 type == V1_2::OperandType::TENSOR_FLOAT16 ||
202 type == V1_2::OperandType::TENSOR_FLOAT32 ||
203 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000204 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000205 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
206 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207 type == V1_2::OperandType::TENSOR_INT32;
208}
209
210#endif
211
Kevin May42477c12020-03-26 13:34:14 +0000212#ifdef ARMNN_ANDROID_NN_V1_3
213
214// Support within the 1.3 driver for specific tensor data types
215inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
216{
217 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100218 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000219 type == V1_3::OperandType::TENSOR_FLOAT16 ||
220 type == V1_3::OperandType::TENSOR_FLOAT32 ||
221 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100222 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000223 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
224 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
225 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
226 type == V1_3::OperandType::TENSOR_INT32;
227}
228
229#endif
230
Mike Kellyb5fdf382019-06-11 16:35:25 +0100231inline bool IsBool(V1_0::Operand)
232{
233 return false;
234}
235
Kevin May42477c12020-03-26 13:34:14 +0000236inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100237{
238 return false;
239}
240
Kevin May42477c12020-03-26 13:34:14 +0000241#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100242
243inline bool IsBool(V1_2::Operand operand)
244{
245 return operand.type == V1_2::OperandType::BOOL;
246}
247
Sadik Armagan61113162019-07-25 09:09:40 +0100248/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000249inline bool Is12OrLaterOperand(V1_2::Operand)
250{
251 return true;
252}
253
254#endif
255
256#ifdef ARMNN_ANDROID_NN_V1_3
257
258inline bool IsBool(V1_3::Operand operand)
259{
260 return operand.type == V1_3::OperandType::BOOL;
261}
262
263/// Checks if a operand is 1_2 Operand
264inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100265{
266 return true;
267}
268
Mike Kellyb5fdf382019-06-11 16:35:25 +0100269#endif
270
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100271template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000272armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
273 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274 armnn::TensorInfo reshapeInfo)
275{
276 armnn::ReshapeDescriptor reshapeDescriptor;
277 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
278
279 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100280 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100281
282 // Attach the input layer to the reshape layer
283 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
284 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
285
286 return *reshapeLayer;
287}
288
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000289bool BroadcastTensor(LayerInputHandle& input0,
290 LayerInputHandle& input1,
291 armnn::IConnectableLayer* startLayer,
292 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100293{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100294 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100295
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
297 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
298
299 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
300 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
301
302 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100303 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
305 input0.Connect(startLayer->GetInputSlot(0));
306 input1.Connect(startLayer->GetInputSlot(1));
307
Sadik Armagan64b19b52019-08-19 09:49:58 +0100308 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100309 }
310
311 // Since the number of dimensions do not match then we need to add degenerate dimensions
312 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
313
314 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100315 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
316 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317
318 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
319 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
320 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
321
322 const armnn::TensorShape& smallShape = smallInfo.GetShape();
323 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
324 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
325 {
326 reshapedDimensions[i] = smallShape[i - sizeDifference];
327 }
328
329 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100330 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100331 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100332
333 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
334 armnn::ReshapeDescriptor reshapeDescriptor;
335
336 bool isSupported = false;
337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
338 IsReshapeSupported,
339 data.m_Backends,
340 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000341 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 reshapedInfo,
343 reshapeDescriptor);
344 if (!isSupported)
345 {
346 return false;
347 }
348
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100349 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100350 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100351
352 if (input0IsSmaller)
353 {
354 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
355 //
356 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100357 // | |
358 // Reshape |
359 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100360 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100361
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100362 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
363 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100364 }
365 else
366 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100367 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
368 //
369 // Input0 Input1
370 // | |
371 // | Reshape
372 // \ /
373 // StartLayer
374
arovir01b0717b52018-09-05 17:03:25 +0100375 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100377 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100378
379 return true;
arovir01b0717b52018-09-05 17:03:25 +0100380}
381
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000382void CalcPadding(uint32_t input,
383 uint32_t kernel,
384 uint32_t stride,
385 uint32_t& outPadHead,
386 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100387 android::nn::PaddingScheme scheme)
388{
389 int32_t padHead;
390 int32_t padTail;
391 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100392 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
393 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100394}
395
Kevin May42477c12020-03-26 13:34:14 +0000396#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100397
398void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
399 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
400{
401 int32_t padHead;
402 int32_t padTail;
403 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100404 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
405 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100406}
407
Mike Kelly26123db2020-01-15 10:02:33 +0000408void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100409 int32_t& outPadTail, android::nn::PaddingScheme scheme)
410{
411 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
412}
413
Mike Kelly86b36d42019-07-12 16:39:33 +0100414#endif
415
Matthew Bentham912b3622019-05-03 15:49:14 +0100416Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100417{
418 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100419 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100420 shape.dimensions = operand.dimensions;
421 shape.scale = operand.scale;
422 shape.offset = operand.zeroPoint;
423 return shape;
424}
425
Kevin May42477c12020-03-26 13:34:14 +0000426#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100427
428Shape GetOperandShape(const V1_2::Operand& operand)
429{
430 Shape shape;
431 shape.type = OperandType(operand.type);
432 shape.dimensions = operand.dimensions;
433 shape.scale = operand.scale;
434 shape.offset = operand.zeroPoint;
435 return shape;
436}
437
438#endif
439
Kevin May42477c12020-03-26 13:34:14 +0000440#ifdef ARMNN_ANDROID_NN_V1_3
441
442Shape GetOperandShape(const V1_3::Operand& operand)
443{
444 Shape shape;
445 shape.type = OperandType(operand.type);
446 shape.dimensions = operand.dimensions;
447 shape.scale = operand.scale;
448 shape.offset = operand.zeroPoint;
449 return shape;
450}
451
452#endif
453
arovir01b0717b52018-09-05 17:03:25 +0100454// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
455// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100456// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
457// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100458void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000459 const armnn::TensorInfo& weightInfo,
460 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100461{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000462 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100463 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 // NOTE: Bias scale is always set to 0 for per-axis quantization and
465 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
466 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100467 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000468 return biasScale * inputInfo.GetQuantizationScale();
469 };
470
471 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
472 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
473
474 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100475 // bias is expected to be a 1d tensor, set qdim=0
476 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000477
478 ALOGV("Bias quantization params have been updated for per-axis quantization");
479 }
480 else
481 {
482 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
483 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
484 {
James Ward4e22f602020-10-20 15:50:33 +0100485 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000486 {
487 ALOGW("Bias quantization scale has been modified to match input * weights");
488 biasInfo.SetQuantizationScale(expectedBiasScale);
489 }
arovir01b0717b52018-09-05 17:03:25 +0100490 }
491 }
492}
493
494// 4D Tensor Permutations
495const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100496const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100497const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
498
499// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000500const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
501const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100502
503template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000504armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
505 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100506{
507 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000508 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100509
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100510 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100511
512 // Connect input to swizzle layer
513 input.Connect(layer->GetInputSlot(0));
514
515 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000516 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100517 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
518
519 return *layer;
520}
521
arovir01b0717b52018-09-05 17:03:25 +0100522bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
523 const armnn::TensorShape & outputShape,
524 uint32_t concatDim)
525{
526 // Validate the output shape is correct given the input shapes (which have just been validated)
527 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
528 if (outputShape.GetNumDimensions() != numDimensions)
529 {
530 return Fail("%s: Output shape has wrong number of dimensions", __func__);
531 }
532
533 unsigned int outputSizeAlongConcatenatedDimension = 0;
534 for (unsigned int i = 0; i < inputShapes.size(); i++)
535 {
536 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
537 }
538
539 for (unsigned int i = 0; i < numDimensions; ++i)
540 {
541 if (i == concatDim)
542 {
543 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
544 {
545 return Fail(
546 "%s: Invalid output shape for dimension %d (%d != %d)",
547 __func__,
548 i,
549 outputShape[i],
550 outputSizeAlongConcatenatedDimension);
551 }
552 }
553 else
554 {
555 if (outputShape[i] != inputShapes[0][i])
556 {
557 return Fail("%s: Invalid output shape", __func__);
558 }
559 }
560 }
561
562 return true;
563}
564
565bool RequiresReshape(armnn::TensorShape & inputShape)
566{
567 return inputShape.GetNumDimensions() < 3;
568}
569
arovir01b0717b52018-09-05 17:03:25 +0100570void SwizzleInputs(armnn::INetwork& network,
571 std::vector<LayerInputHandle>& inputs,
572 std::vector<armnn::TensorShape>& inputShapes,
573 const armnn::PermutationVector& mapping)
574{
575 if (!mapping.IsEqual(IdentityPermutation4D))
576 {
577 size_t nInputs = inputs.size();
578 for (size_t i=0; i<nInputs; ++i)
579 {
580 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000581 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100582 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
583 auto& outputInfo = outputSlot.GetTensorInfo();
584 // replace inputs with the swizzled ones
585 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
586 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
587 }
588 }
589}
590
Teresa Charlin185f5882020-04-06 21:59:18 +0100591bool TransposeInputTensors(ConversionData& data,
592 std::vector<LayerInputHandle>& inputs,
593 std::vector<armnn::TensorShape>& inputShapes,
594 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000595{
David Monahan7f492ac2020-10-16 10:36:29 +0100596 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
597 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000598 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100599 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000600 size_t nInputs = inputs.size();
601 for (size_t i=0; i<nInputs; ++i)
602 {
603 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000604 armnn::TransposeDescriptor transposeDesc;
605 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100606 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000607
608 bool isSupported = false;
609 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000610 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000611 data.m_Backends,
612 isSupported,
613 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100614 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000615 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000616 if (!isSupported)
617 {
618 return false;
619 }
620
621 }
622 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
623 }
624 return true;
625}
626
627
narpra01f176d5a2018-11-18 20:17:48 +0000628bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
629 int32_t & concatDimension,
630 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100631{
narpra01f176d5a2018-11-18 20:17:48 +0000632 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100633 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100634
635 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000636 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
637 // or along dimension 0 or 2 for a 3-D tensor.
638 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100639 {
narpra01f176d5a2018-11-18 20:17:48 +0000640 concatDimension = 1;
641 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
642 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100643 }
narpra01f176d5a2018-11-18 20:17:48 +0000644 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100645 {
narpra01f176d5a2018-11-18 20:17:48 +0000646 concatDimension = 0;
647 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
648 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100649 }
David Monahan7f492ac2020-10-16 10:36:29 +0100650 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
651 // permutation identity to only have 3 dimensions
652 else if (numberOfDimensions == 3 && concatDimension == 2)
653 {
654 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
655 }
narpra01f176d5a2018-11-18 20:17:48 +0000656 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100657}
658
659} // anonymous namespace
660
661namespace armnn_driver
662{
663
664//// Creates an ArmNN activation layer and connects it to the given layer, if the
665//// passed in AndroidNN activation function requires so.
666//// @return The end layer of the sequence of layers built for the given AndroidNN
667//// activation function or nullptr if an error occurred (e.g. unsupported activation).
668//// Note that the end layer matches the input layer if no activation is required
669//// (the sequence of layers has length 1).
670armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
671 ActivationFn activation,
672 armnn::IConnectableLayer* prevLayer,
673 ConversionData& data);
674
675} // namespace armnn_driver
676
677///
678/// Utility templates
679///
680
681namespace armnn_driver
682{
683
684using namespace android::nn;
685
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100686template<typename HalPolicy,
687 typename HalOperand = typename HalPolicy::Operand,
688 typename HalOperation = typename HalPolicy::Operation,
689 typename HalModel = typename HalPolicy::Model>
690const HalOperand* GetInputOperand(const HalOperation& operation,
691 uint32_t inputIndex,
692 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100693 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100694{
695 if (inputIndex >= operation.inputs.size())
696 {
saoste01b8471482018-10-10 09:44:51 +0100697 if (failOnIndexOutOfBounds)
698 {
699 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
700 }
arovir01b0717b52018-09-05 17:03:25 +0100701 return nullptr;
702 }
703
Kevin May42477c12020-03-26 13:34:14 +0000704 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100705 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000706 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100707}
708
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709template<typename HalPolicy,
710 typename HalOperand = typename HalPolicy::Operand,
711 typename HalOperation = typename HalPolicy::Operation,
712 typename HalModel = typename HalPolicy::Model>
713const HalOperand* GetOutputOperand(const HalOperation& operation,
714 uint32_t outputIndex,
715 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100716{
717 if (outputIndex >= operation.outputs.size())
718 {
719 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
720 return nullptr;
721 }
722
723 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100724 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100725
Kevin May42477c12020-03-26 13:34:14 +0000726 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100727}
728
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100729template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100730 typename HalOperand = typename HalPolicy::Operand,
731 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100733 const HalModel& model,
734 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000735 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100736{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100738
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100739 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100740 switch (operand.lifetime)
741 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100743 {
744 // Constant found in model.operandValues
745 valueStart = &model.operandValues[operand.location.offset];
746 break;
747 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100748 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100749 {
750 // Constant specified via a Memory object
751 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
752 break;
753 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000755 {
756 // An optional input tensor with no values is not an error so should not register as a fail
757 if (optional)
758 {
759 valueStart = nullptr;
760 break;
761 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100762 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000763 }
arovir01b0717b52018-09-05 17:03:25 +0100764 default:
765 {
766 // Unsupported/invalid (e.g. can't get value of an input to the model)
767 Fail("%s: unsupported/invalid operand lifetime: %s",
768 __func__, toString(operand.lifetime).c_str());
769 valueStart = nullptr;
770 }
771 }
772
773 return valueStart;
774}
775
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100777 typename HalOperation = typename HalPolicy::Operation,
778 typename HalModel = typename HalPolicy::Model,
779 typename HalOperandType = typename HalPolicy::OperandType>
780bool GetOperandType(const HalOperation& operation,
781 uint32_t inputIndex,
782 const HalModel& model,
783 HalOperandType& type)
784{
785 using HalOperand = typename HalPolicy::Operand;
786
787 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
788 if (!operand)
789 {
790 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
791 }
792
793 type = operand->type;
794 return true;
795}
796
797template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000798 typename HalOperand = typename HalPolicy::Operand>
799bool IsOperandConstant(const HalOperand& operand)
800{
801 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
802
803 HalOperandLifeTime lifetime = operand.lifetime;
804
805 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
806 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
807 lifetime == HalOperandLifeTime::NO_VALUE;
808}
809
810template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 typename HalOperand = typename HalPolicy::Operand,
812 typename HalModel = typename HalPolicy::Model>
813ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
814 const HalModel& model,
815 const ConversionData& data,
816 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
817 const armnn::TensorShape* overrideTensorShape = nullptr,
818 bool optional = false)
819{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820 if (!IsOperandTypeSupportedForTensors(operand.type))
821 {
822 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
823 return ConstTensorPin();
824 }
825
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000826 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100827 {
828 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
829 return ConstTensorPin();
830 }
831
832 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
833 if (!valueStart)
834 {
835 if (optional)
836 {
837 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
838 return ConstTensorPin(true);
839 }
840 // mandatory tensor with no values
841 Fail("%s: failed to get operand address", __func__);
842 return ConstTensorPin();
843 }
844
845 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000846
Matthew Sloyan29cc9612021-07-16 10:21:12 +0100847 // Make sure isConstant flag is set.
848 tensorInfo.SetConstant();
849
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100850 if (overrideTensorShape != nullptr)
851 {
852 tensorInfo.SetShape(*overrideTensorShape);
853 }
854 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
855}
856
857template<typename HalPolicy,
858 typename HalOperation = typename HalPolicy::Operation,
859 typename HalModel = typename HalPolicy::Model>
860ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
861 uint32_t inputIndex,
862 const HalModel& model,
863 const ConversionData& data,
864 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
865 const armnn::TensorShape* overrideTensorShape = nullptr,
866 bool optional = false)
867{
868 using HalOperand = typename HalPolicy::Operand;
869
870 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
871 if (!operand)
872 {
873 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
874 return ConstTensorPin();
875 }
876 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
877 model,
878 data,
879 dimensionMappings,
880 overrideTensorShape,
881 optional);
882}
883
884template<typename HalPolicy,
885 typename OutputType,
886 typename HalOperandType = typename HalPolicy::OperandType,
887 typename HalOperation = typename HalPolicy::Operation,
888 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100889bool GetInputScalar(const HalOperation& operation,
890 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100891 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100892 OutputType& outValue,
893 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100894 const ConversionData& data,
895 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100896{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100897 using HalOperand = typename HalPolicy::Operand;
898
899 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100900 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100901 {
902 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
903 }
904
Sadik Armagan813f2302020-05-19 14:10:30 +0100905 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100906 {
907 return Fail("%s: unexpected operand type: %s (should be %s)",
908 __func__, toString(operand->type).c_str(), toString(type).c_str());
909 }
910
Sadik Armagan813f2302020-05-19 14:10:30 +0100911 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100912 {
913 return Fail("%s: incorrect operand location length: %i (should be %i)",
914 __func__, operand->location.length, sizeof(OutputType));
915 }
916
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100918 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100919 {
920 return Fail("%s: failed to get address for operand", __func__);
921 }
922
Sadik Armagan813f2302020-05-19 14:10:30 +0100923 if(!optional)
924 {
925 outValue = *(static_cast<const OutputType*>(valueAddress));
926 }
927
arovir01b0717b52018-09-05 17:03:25 +0100928 return true;
929}
930
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100931template<typename HalPolicy,
932 typename HalOperation = typename HalPolicy::Operation,
933 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100934bool GetInputInt32(const HalOperation& operation,
935 uint32_t inputIndex,
936 int32_t& outValue,
937 const HalModel& model,
938 const ConversionData& data)
939{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100941}
942
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943template<typename HalPolicy,
944 typename HalOperation = typename HalPolicy::Operation,
945 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100946bool GetInputFloat32(const HalOperation& operation,
947 uint32_t inputIndex,
948 float& outValue,
949 const HalModel& model,
950 const ConversionData& data)
951{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100953}
954
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100955template<typename HalPolicy,
956 typename HalOperation = typename HalPolicy::Operation,
957 typename HalOperandType = typename HalPolicy::OperandType,
958 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100959bool GetInputActivationFunctionImpl(const HalOperation& operation,
960 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100961 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100962 ActivationFn& outActivationFunction,
963 const HalModel& model,
964 const ConversionData& data)
965{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100966 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100967 {
968 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
969 __func__,
970 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000971 toString(HalOperandType::INT32).c_str(),
972 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100973 }
974
975 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100976 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100977 {
978 return Fail("%s: failed to get activation input value", __func__);
979 }
980 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
981 return true;
982}
983
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100984template<typename HalPolicy,
985 typename HalOperation = typename HalPolicy::Operation,
986 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100987bool GetInputActivationFunction(const HalOperation& operation,
988 uint32_t inputIndex,
989 ActivationFn& outActivationFunction,
990 const HalModel& model,
991 const ConversionData& data)
992{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100993 return GetInputActivationFunctionImpl<HalPolicy>(operation,
994 inputIndex,
995 HalPolicy::OperandType::INT32,
996 outActivationFunction,
997 model,
998 data);
arovir01b0717b52018-09-05 17:03:25 +0100999}
1000
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001template<typename HalPolicy,
1002 typename HalOperation = typename HalPolicy::Operation,
1003 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001004bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1005 uint32_t inputIndex,
1006 ActivationFn& outActivationFunction,
1007 const HalModel& model,
1008 const ConversionData& data)
1009{
1010 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1012 inputIndex,
1013 HalPolicy::OperandType::INT32,
1014 outActivationFunction,
1015 model,
1016 data);
arovir01b0717b52018-09-05 17:03:25 +01001017}
1018
1019
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001020template<typename HalPolicy,
1021 typename HalOperation = typename HalPolicy::Operation,
1022 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001023bool GetOptionalInputActivation(const HalOperation& operation,
1024 uint32_t inputIndex,
1025 ActivationFn& activationFunction,
1026 const HalModel& model,
1027 const ConversionData& data)
1028{
1029 if (operation.inputs.size() <= inputIndex)
1030 {
1031 activationFunction = ActivationFn::kActivationNone;
1032 }
1033 else
1034 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001035 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001036 {
1037 return Fail("%s: Operation has invalid inputs", __func__);
1038 }
1039 }
1040 return true;
1041}
1042
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001043template<typename HalPolicy,
1044 typename ConvolutionDescriptor,
1045 typename HalOperation = typename HalPolicy::Operation,
1046 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001047bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1048 uint32_t dilationXIndex,
1049 ConvolutionDescriptor& descriptor,
1050 const HalModel& model,
1051 const ConversionData& data)
1052{
1053 bool success = true;
1054 if (operation.inputs.size() >= dilationXIndex + 2)
1055 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001056 success &= GetInputScalar<HalPolicy>(operation,
1057 dilationXIndex,
1058 HalPolicy::OperandType::INT32,
1059 descriptor.m_DilationX,
1060 model,
1061 data);
1062 success &= GetInputScalar<HalPolicy>(operation,
1063 dilationXIndex + 1,
1064 HalPolicy::OperandType::INT32,
1065 descriptor.m_DilationY,
1066 model,
1067 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001068 }
1069
1070 return success;
1071}
1072
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001074 typename HalOperation = typename HalPolicy::Operation,
1075 typename HalModel = typename HalPolicy::Model>
1076bool GetOptionalBool(const HalOperation& operation,
1077 uint32_t inputIndex,
1078 const HalModel& model,
1079 const ConversionData& data)
1080{
1081 using HalOperand = typename HalPolicy::Operand;
1082
1083 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1084 if (!operand)
1085 {
1086 return false;
1087 }
1088
1089 if (!IsBool(*operand))
1090 {
1091 return false;
1092 }
1093
1094 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1095 if (!valueAddress)
1096 {
1097 return false;
1098 }
1099
1100 if (*(static_cast<const bool*>(valueAddress)))
1101 {
1102 return true;
1103 }
1104 else
1105 {
1106 return false;
1107 }
1108}
1109
1110template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001111 typename HalOperand = typename HalPolicy::Operand,
1112 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001113bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001114 std::vector<int32_t>& outValues,
1115 const HalModel& model,
1116 const ConversionData& data)
1117{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001118 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001119 {
1120 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1121 }
1122
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001123 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001124 if (!startAddress)
1125 {
1126 return Fail("%s: failed to get operand address", __func__, operand.type);
1127 }
1128
1129 // Check number of bytes is sensible
1130 const uint32_t numBytes = operand.location.length;
1131 if (numBytes % sizeof(int32_t) != 0)
1132 {
1133 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1134 __func__, numBytes, sizeof(int32_t));
1135 }
1136
1137 outValues.resize(numBytes / sizeof(int32_t));
1138 memcpy(outValues.data(), startAddress, numBytes);
1139 return true;
1140}
1141
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142template<typename HalPolicy,
1143 typename HalOperation = typename HalPolicy::Operation,
1144 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001145bool GetInputPaddingScheme(const HalOperation& operation,
1146 uint32_t inputIndex,
1147 PaddingScheme& outPaddingScheme,
1148 const HalModel& model,
1149 const ConversionData& data)
1150{
1151 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001152 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001153 {
1154 return Fail("%s: failed to get padding scheme input value", __func__);
1155 }
1156
1157 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1158 return true;
1159}
1160
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001161template<typename HalPolicy,
1162 typename HalOperation = typename HalPolicy::Operation,
1163 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001164LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1165 uint32_t inputIndex,
1166 const HalModel& model,
1167 ConversionData& data)
1168{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001169 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001170 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001171 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1172
1173 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001174 if (!operand)
1175 {
1176 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1177 return LayerInputHandle();
1178 }
1179
1180 if (!IsOperandTypeSupportedForTensors(operand->type))
1181 {
1182 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1183 return LayerInputHandle();
1184 }
1185
Sadik Armagan44bcc022019-06-18 17:21:36 +01001186 try
arovir01b0717b52018-09-05 17:03:25 +01001187 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001188 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001189 if (IsDynamicTensor(operandTensorInfo))
1190 {
1191 Fail("%s: dynamic input tensors are not supported", __func__);
1192 return LayerInputHandle();
1193 }
arovir01b0717b52018-09-05 17:03:25 +01001194
Sadik Armagan44bcc022019-06-18 17:21:36 +01001195 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001196 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001197 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001198 {
1199 // NOTE: We must check whether we can support the input tensor on at least one
1200 // of the provided backends; otherwise we cannot convert the operation
1201 bool isInputSupported = false;
1202 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1203 IsInputSupported,
1204 data.m_Backends,
1205 isInputSupported,
1206 operandTensorInfo);
1207
1208 if (!isInputSupported)
1209 {
1210 Fail("%s: unsupported input tensor", __func__);
1211 return LayerInputHandle();
1212 }
1213
James Ward4e22f602020-10-20 15:50:33 +01001214 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001215 }
1216 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001217 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001218 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001219 // The tensor is either an operand internal to the model, or a model input.
1220 // It can be associated with an ArmNN output slot for an existing layer.
1221
1222 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1223 const uint32_t operandIndex = operation.inputs[inputIndex];
1224 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001225 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001226 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001227 case HalOperandLifeTime::CONSTANT_REFERENCE:
1228 {
1229 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1230 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1231 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001232 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001233 bool isSupported = false;
1234 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1235 IsConstantSupported,
1236 data.m_Backends,
1237 isSupported,
1238 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001239 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001240 {
1241 return LayerInputHandle();
1242 }
1243
1244 armnn::IConnectableLayer* constantLayer =
1245 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1246 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001247 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1248 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001249
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001250 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001251 }
1252 else
1253 {
1254 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001255 return LayerInputHandle();
1256 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001257 break;
arovir01b0717b52018-09-05 17:03:25 +01001258 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001259 default:
arovir01b0717b52018-09-05 17:03:25 +01001260 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001261 // Unsupported lifetime for an input tensor
1262 Fail("%s: unsupported lifetime for input tensor: %s",
1263 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001264 return LayerInputHandle();
1265 }
arovir01b0717b52018-09-05 17:03:25 +01001266 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001267 }
1268 catch (UnsupportedOperand<HalOperandType>& e)
1269 {
1270 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1271 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001272 }
1273}
1274
Kevin May42477c12020-03-26 13:34:14 +00001275
1276#ifdef ARMNN_ANDROID_NN_V1_3
1277template<typename HalPolicy>
1278LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1279 uint32_t inputIndex,
1280 const::android::hardware::neuralnetworks::V1_3::Model& model,
1281 ConversionData& data)
1282{
1283 using HalOperand = typename HalPolicy::Operand;
1284 using HalOperandType = typename HalPolicy::OperandType;
1285 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1286
1287 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1288 if (!operand)
1289 {
1290 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1291 return LayerInputHandle();
1292 }
1293
1294 if (!IsOperandTypeSupportedForTensors(operand->type))
1295 {
1296 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1297 return LayerInputHandle();
1298 }
1299
1300 try
1301 {
1302 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001303
Kevin May42477c12020-03-26 13:34:14 +00001304 if (IsDynamicTensor(operandTensorInfo))
1305 {
Finn Williams291a16b2020-08-19 22:54:00 +01001306 data.m_DynamicInputsEncountered = true;
1307
Finn Williams9a044412020-08-17 19:08:35 +01001308 const uint32_t operandIndex = operation.inputs[inputIndex];
1309
1310 // Check if the dynamic input tensors have been inferred by one of the previous layers
1311 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001312 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001313 {
1314 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1315 }
1316 else
1317 {
1318 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1319 return LayerInputHandle();
1320 }
Kevin May42477c12020-03-26 13:34:14 +00001321 }
1322
1323 switch (operand->lifetime)
1324 {
1325 case HalOperandLifeTime::SUBGRAPH_INPUT:
1326 {
1327 // NOTE: We must check whether we can support the input tensor on at least one
1328 // of the provided backends; otherwise we cannot convert the operation
1329 bool isInputSupported = false;
1330 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1331 IsInputSupported,
1332 data.m_Backends,
1333 isInputSupported,
1334 operandTensorInfo);
1335
1336 if (!isInputSupported)
1337 {
1338 Fail("%s: unsupported input tensor", __func__);
1339 return LayerInputHandle();
1340 }
1341
James Ward4e22f602020-10-20 15:50:33 +01001342 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001343 }
1344 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1345 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1346 {
1347 // The tensor is either an operand internal to the model, or a model input.
1348 // It can be associated with an ArmNN output slot for an existing layer.
1349
1350 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1351 const uint32_t operandIndex = operation.inputs[inputIndex];
1352 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1353 }
1354 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1355 case HalOperandLifeTime::CONSTANT_REFERENCE:
1356 {
1357 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1358 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1359 if (tensorPin.IsValid())
1360 {
1361 bool isSupported = false;
1362 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1363 IsConstantSupported,
1364 data.m_Backends,
1365 isSupported,
1366 tensorPin.GetConstTensor().GetInfo());
1367 if (!isSupported)
1368 {
1369 return LayerInputHandle();
1370 }
1371
1372 armnn::IConnectableLayer* constantLayer =
1373 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1374 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001375 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1376 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001377
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001378 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001379 }
1380 else
1381 {
1382 Fail("%s: invalid operand tensor", __func__);
1383 return LayerInputHandle();
1384 }
1385 break;
1386 }
1387 default:
1388 {
1389 // Unsupported lifetime for an input tensor
1390 Fail("%s: unsupported lifetime for input tensor: %s",
1391 __func__, toString(operand->lifetime).c_str());
1392 return LayerInputHandle();
1393 }
1394 }
1395 }
1396 catch (UnsupportedOperand<HalOperandType>& e)
1397 {
1398 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1399 return LayerInputHandle();
1400 }
1401}
1402#endif
1403
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001404template<typename HalPolicy,
1405 typename HalOperation = typename HalPolicy::Operation,
1406 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001407bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1408 uint32_t operationOutputIndex,
1409 armnn::IConnectableLayer& layer,
1410 uint32_t layerOutputIndex,
1411 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001412 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001413 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001414 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001415 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001416 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001417{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001418 using HalOperand = typename HalPolicy::Operand;
1419
1420 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001421 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1422 {
1423 return false;
1424 }
1425
1426 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001427 if (overrideOutputInfo == nullptr)
1428 {
1429 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1430 }
1431 else
1432 {
1433 outputSlot.SetTensorInfo(*overrideOutputInfo);
1434 }
1435
Finn Williamsa4983ce2020-07-23 12:55:12 +01001436 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001437 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001438 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001439 // Type one dynamic tensors require the previous layer's output shape for inference
1440 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1441 {
1442 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1443 {
1444 return false;
1445 }
1446 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001447 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001448 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001449 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001450 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1451
Sadik Armagandbda4b72020-09-03 11:33:07 +01001452 if(!isSupported)
1453 {
1454 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1455 {
1456 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1457 }
1458 return false;
1459 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001460 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001461
Finn Williamsa4983ce2020-07-23 12:55:12 +01001462 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001463
1464 if (activationFunction != ActivationFn::kActivationNone)
1465 {
1466 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1467 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1468 &layer, data);
1469
1470 if (!endLayer)
1471 {
1472 return Fail("%s: ProcessActivation failed", __func__);
1473 }
1474
1475 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1476 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1477 }
1478 else
1479 {
1480 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1481 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001482
Mike Kellyb5fdf382019-06-11 16:35:25 +01001483 return true;
1484}
1485
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001486template<typename HalPolicy,
1487 typename HalOperation = typename HalPolicy::Operation,
1488 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001489armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1490 uint32_t inputIndex,
1491 const HalModel& model,
1492 ConversionData& data)
1493{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001494 using HalOperand = typename HalPolicy::Operand;
1495
1496 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001497 if (!operand)
1498 {
1499 return armnn::DataLayout::NHWC;
1500 }
1501
1502 if (!IsBool(*operand))
1503 {
1504 return armnn::DataLayout::NHWC;
1505 }
1506
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001507 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001508 if (!valueAddress)
1509 {
1510 return armnn::DataLayout::NHWC;
1511 }
1512
1513 if (*(static_cast<const bool*>(valueAddress)))
1514 {
1515 return armnn::DataLayout::NCHW;
1516 }
1517 else
1518 {
1519 return armnn::DataLayout::NHWC;
1520 }
1521}
1522
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001523template<typename HalPolicy,
1524 typename HalOperation = typename HalPolicy::Operation,
1525 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001526bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1527 uint32_t outputIndex,
1528 armnn::IConnectableLayer& layer,
1529 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001530 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001531 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001532 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1533 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001534{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001535 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1536 outputIndex,
1537 layer,
1538 outputIndex,
1539 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001540 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001541 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001542 validateFunc,
1543 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001544}
1545
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001546template<typename HalPolicy,
1547 typename HalOperation = typename HalPolicy::Operation,
1548 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001549bool ConvertToActivation(const HalOperation& operation,
1550 const char* operationName,
1551 const armnn::ActivationDescriptor& activationDesc,
1552 const HalModel& model,
1553 ConversionData& data)
1554{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001555 using HalOperand = typename HalPolicy::Operand;
1556
1557 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001558 if (!input.IsValid())
1559 {
1560 return Fail("%s: Input 0 is invalid", operationName);
1561 }
1562
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001563 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001564 if (!outputOperand)
1565 {
1566 return false;
1567 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001568
1569 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001570
1571 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001572
1573 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1574 {
1575 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1576 IsActivationSupported,
1577 data.m_Backends,
1578 isSupported,
1579 input.GetTensorInfo(),
1580 outInfo,
1581 activationDesc);
1582 };
1583
1584 if(IsDynamicTensor(outInfo))
1585 {
1586 isSupported = AreDynamicTensorsSupported();
1587 }
1588 else
1589 {
1590 validateFunc(outInfo, isSupported);
1591 }
1592
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001593 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001594 {
1595 return false;
1596 }
1597
1598 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001599 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001600 input.Connect(layer->GetInputSlot(0));
1601
Finn Williamsa4983ce2020-07-23 12:55:12 +01001602 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001603}
1604
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001605template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001606 typename HalOperation = typename HalPolicy::Operation,
1607 typename HalModel = typename HalPolicy::Model>
1608bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1609{
1610 armnn::ActivationDescriptor desc;
1611 desc.m_Function = armnn::ActivationFunction::ReLu;
1612
1613 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1614}
1615
1616template<typename HalPolicy,
1617 typename HalOperation = typename HalPolicy::Operation,
1618 typename HalModel = typename HalPolicy::Model>
1619bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1620{
1621 armnn::ActivationDescriptor desc;
1622 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1623 desc.m_A = 1.0f;
1624 desc.m_B = -1.0f;
1625
1626 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1627}
1628
1629template<typename HalPolicy,
1630 typename HalOperation = typename HalPolicy::Operation,
1631 typename HalModel = typename HalPolicy::Model>
1632bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1633{
1634 armnn::ActivationDescriptor desc;
1635 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1636 desc.m_A = 6.0f;
1637
1638 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1639}
1640
1641template<typename HalPolicy,
1642 typename HalOperation = typename HalPolicy::Operation,
1643 typename HalModel = typename HalPolicy::Model>
1644bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1645{
1646 armnn::ActivationDescriptor desc;
1647 desc.m_Function = armnn::ActivationFunction::TanH;
1648 desc.m_A = 1.0f; // android nn does not support tanH parameters
1649 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1650
1651 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1652}
1653
1654template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001655 typename HalOperation = typename HalPolicy::Operation,
1656 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001657bool ConvertPaddings(const HalOperation& operation,
1658 const HalModel& model,
1659 ConversionData& data,
1660 unsigned int rank,
1661 armnn::PadDescriptor& padDescriptor)
1662{
1663 using HalOperand = typename HalPolicy::Operand;
1664
1665 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1666 if (!paddingsOperand)
1667 {
1668 return Fail("%s: Could not read paddings operand", __func__);
1669 }
1670
1671 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1672 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1673 {
1674 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1675 }
1676
1677 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001678 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1679 {
1680 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1681 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001682
1683 // add padding for each dimension of input tensor.
1684 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1685 {
1686 int paddingBeforeInput = paddings[i];
1687 int paddingAfterInput = paddings[i + 1];
1688
1689 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1690 {
1691 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1692 }
1693
1694 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1695 }
1696
1697 return true;
1698}
1699
1700template<typename HalPolicy,
1701 typename HalOperation = typename HalPolicy::Operation,
1702 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001703bool ConvertPooling2d(const HalOperation& operation,
1704 const char* operationName,
1705 armnn::PoolingAlgorithm poolType,
1706 const HalModel& model,
1707 ConversionData& data)
1708{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001709 using HalOperand = typename HalPolicy::Operand;
1710 using HalOperandType = typename HalPolicy::OperandType;
1711
1712 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001713 if (!input.IsValid())
1714 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001715 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001716 }
1717
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001718 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001719 if (!output)
1720 {
1721 return Fail("%s: Could not read output 0", __func__);
1722 }
1723
1724 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1725 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1726
arovir01b0717b52018-09-05 17:03:25 +01001727 armnn::Pooling2dDescriptor desc;
1728 desc.m_PoolType = poolType;
1729 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001730 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001731
1732 ActivationFn activation;
1733
Sadik Armagan15d63e22019-07-26 16:59:35 +01001734 auto inputSize = operation.inputs.size();
1735
1736 if (inputSize >= 10)
1737 {
1738 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1739 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1740 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1741 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1742 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1743 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1744 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1745 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1746 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1747 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1748 {
1749 return Fail("%s: Operation has invalid inputs", operationName);
1750 }
1751
Kevin May42477c12020-03-26 13:34:14 +00001752 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001753 {
1754 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1755 }
1756 }
1757 else
arovir01b0717b52018-09-05 17:03:25 +01001758 {
1759 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1760 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001761 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1762 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1763 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1764 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1765 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1766 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001767 {
1768 return Fail("%s: Operation has invalid inputs", operationName);
1769 }
1770
Kevin May42477c12020-03-26 13:34:14 +00001771 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001772 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001773 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001774 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001775
1776 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1777 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1778 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1779
1780 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1781 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001782 }
1783
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001784 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001785
1786 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1787 {
1788 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1789 IsPooling2dSupported,
1790 data.m_Backends,
1791 isSupported,
1792 inputInfo,
1793 outputInfo,
1794 desc);
1795
1796 };
1797
1798 if(IsDynamicTensor(outputInfo))
1799 {
1800 isSupported = AreDynamicTensorsSupported();
1801 }
1802 else
1803 {
1804 validateFunc(outputInfo, isSupported);
1805 }
1806
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001807 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001808 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001809 return false;
arovir01b0717b52018-09-05 17:03:25 +01001810 }
arovir01b0717b52018-09-05 17:03:25 +01001811
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001812 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1813 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001814 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001815 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001816 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001817
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001818 input.Connect(pooling2dLayer->GetInputSlot(0));
1819
Finn Williamsa4983ce2020-07-23 12:55:12 +01001820 if (!isSupported)
1821 {
1822 return false;
1823 }
1824
Kevin Mayfcf2a152020-09-08 16:06:32 +01001825 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1826 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001827}
1828
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001829template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001830 typename HalOperation = typename HalPolicy::Operation,
1831 typename HalModel = typename HalPolicy::Model>
1832bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001833{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001834 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001835
1836 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1837 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1838
1839 if (!input0.IsValid() || !input1.IsValid())
1840 {
1841 return Fail("%s: Operation has invalid inputs", __func__);
1842 }
1843
1844 // The FuseActivation parameter is always the input index 2
1845 // and it should be optional
1846 ActivationFn activationFunction;
1847 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1848 {
1849 return Fail("%s: Operation has invalid inputs", __func__);
1850 }
1851
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001852 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001853 if (!outputOperand)
1854 {
1855 return false;
1856 }
1857
1858 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1859 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1860
1861 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001862
1863 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001864 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1865 {
1866 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1867 IsAdditionSupported,
1868 data.m_Backends,
1869 isSupported,
1870 inputInfo0,
1871 inputInfo1,
1872 outputInfo);
1873 };
1874
1875 if(!IsDynamicTensor(outputInfo))
1876 {
1877 validateFunc(outputInfo, isSupported);
1878 }
1879 else
1880 {
1881 isSupported = AreDynamicTensorsSupported();
1882 }
1883
Mike Kelly46272802019-08-14 17:00:48 +01001884 if (!isSupported)
1885 {
1886 return false;
1887 }
1888
1889 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001890
Kevin Mayfcf2a152020-09-08 16:06:32 +01001891 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1892 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001893 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001894 return false;
1895 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001896
Kevin Mayfcf2a152020-09-08 16:06:32 +01001897 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1898 data, nullptr, validateFunc, activationFunction);
1899
Mike Kelly46272802019-08-14 17:00:48 +01001900}
1901
1902template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001903 typename HalOperation = typename HalPolicy::Operation,
1904 typename HalModel = typename HalPolicy::Model>
1905bool ConvertArgMinMax(const HalOperation& operation,
1906 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001907 ConversionData& data,
1908 armnn::ArgMinMaxFunction argMinMaxFunction)
1909{
1910 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1911
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001912 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001913 using HalOperandType = typename HalPolicy::OperandType;
1914
1915 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1916
1917 if (!input0.IsValid())
1918 {
1919 return Fail("%s: Operation has invalid inputs", __func__);
1920 }
1921
1922 int32_t axis;
1923 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1924 {
1925 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1926 }
1927
1928 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1929 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1930
1931 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1932 {
1933 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1934 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1935 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1936 return Fail("%s: Axis must be in range [-n, n)", __func__);
1937 }
1938
1939 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1940 if (!output)
1941 {
1942 return Fail("%s: Could not read output 0", __func__);
1943 }
1944
1945 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1946
1947 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001948
1949 armnn::ArgMinMaxDescriptor descriptor;
1950 descriptor.m_Function = argMinMaxFunction;
1951 descriptor.m_Axis = axis;
1952
1953 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001954
1955 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1956 {
1957 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1958 IsArgMinMaxSupported,
1959 data.m_Backends,
1960 isSupported,
1961 inputInfo0,
1962 outputInfo,
1963 descriptor);
1964 };
1965
1966 if(IsDynamicTensor(outputInfo))
1967 {
1968 isSupported = AreDynamicTensorsSupported();
1969 }
1970 else
1971 {
1972 validateFunc(outputInfo, isSupported);
1973 }
1974
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001975 if (!isSupported)
1976 {
1977 return false;
1978 }
1979
1980 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1981 assert(layer != nullptr);
1982
1983 input0.Connect(layer->GetInputSlot(0));
1984
Finn Williamsa4983ce2020-07-23 12:55:12 +01001985 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001986}
1987
1988template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001989 typename HalOperation = typename HalPolicy::Operation,
1990 typename HalModel = typename HalPolicy::Model>
1991bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001992{
Keith Davis6e4081f2020-09-03 13:17:21 +01001993 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001994 using HalOperandType = typename HalPolicy::OperandType;
1995
1996 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1997 if (operation.inputs.size() <= 1)
1998 {
1999 return Fail("%s: Operation has insufficient arguments", __func__);
2000 }
2001
2002 // Get inputs and outputs
2003 const std::size_t numInputTensors = operation.inputs.size() - 1;
2004
2005 int32_t concatDim;
2006 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2007 {
2008 return Fail("%s: Operation has invalid inputs", __func__);
2009 }
2010
2011 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2012 if (!outputOperand)
2013 {
2014 return Fail("%s: Operation has no outputs", __func__);
2015 }
2016
Keith Davis6e4081f2020-09-03 13:17:21 +01002017 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2018 armnn::TensorShape outputShape = outputInfo.GetShape();
2019 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002020 //
2021 // handle negative concat dims along the lines of tensorflow as described here:
2022 // https://www.tensorflow.org/api_docs/python/tf/concat
2023 // "negative axis refers to axis + rank(values)-th dimension"
2024 //
2025 if (concatDim < 0)
2026 {
2027 concatDim += outputShape.GetNumDimensions();
2028 }
2029
2030 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2031 {
2032 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2033 }
2034
2035 std::vector<LayerInputHandle> inputHandles;
2036 std::vector<armnn::TensorShape> inputShapes;
2037
2038 inputHandles.reserve(numInputTensors);
2039 inputShapes.reserve(numInputTensors);
2040
Keith Davis6e4081f2020-09-03 13:17:21 +01002041 bool inputsHaveBeenReshaped = false;
2042 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002043 for (uint32_t i = 0; i < numInputTensors; ++i)
2044 {
2045 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2046 if (!operand)
2047 {
2048 return Fail("%s: Operation has invalid inputs", __func__);
2049 }
2050
Teresa Charlin3b959602019-10-31 17:05:47 +00002051 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2052 if (!operandInputHandle.IsValid())
2053 {
2054 return Fail("%s: Operation has invalid inputs", __func__);
2055 }
Mike Kellyb8805202019-07-31 17:25:43 +01002056
Keith Davis6e4081f2020-09-03 13:17:21 +01002057 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002058 if (operandShape.GetNumDimensions() == 0)
2059 {
2060 return Fail("%s: Operands with rank 0 are not supported", __func__);
2061 }
2062
2063 if (RequiresReshape(operandShape))
2064 {
2065 inputsHaveBeenReshaped = true;
2066
2067 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2068
2069 // Expand the tensor to three dimensions
2070 if (operandShape.GetNumDimensions() == 2)
2071 {
2072 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2073 tensorDimensionsAdded = 1;
2074 }
2075 else
2076 {
2077 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2078 tensorDimensionsAdded = 2;
2079 }
2080
Kevin Mayaed08ac2019-12-12 16:33:31 +00002081 armnn::ReshapeDescriptor reshapeDescriptor;
2082 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2083
2084 bool isSupported = false;
2085 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2086 IsReshapeSupported,
2087 data.m_Backends,
2088 isSupported,
2089 operandInputHandle.GetTensorInfo(),
2090 reshapeInfo,
2091 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002092
Kevin Mayaed08ac2019-12-12 16:33:31 +00002093 if (!isSupported)
2094 {
2095 return false;
2096 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002097 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002098
2099 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002100 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002101 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2102 }
2103
2104 inputShapes.emplace_back(operandShape);
2105 inputHandles.emplace_back(operandInputHandle);
2106
2107 if (!inputHandles.back().IsValid())
2108 {
2109 return Fail("%s: Operation has invalid inputs", __func__);
2110 }
2111 }
2112
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002113 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002114
2115 if (inputsHaveBeenReshaped)
2116 {
2117 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2118 concatDim += tensorDimensionsAdded;
2119
2120 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2121 if (tensorDimensionsAdded == 1)
2122 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002123 if (IsDynamicTensor(outputInfo))
2124 {
2125 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2126 }
2127 else
2128 {
2129 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2130 }
Mike Kellyb8805202019-07-31 17:25:43 +01002131 }
2132 else if (tensorDimensionsAdded == 2)
2133 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002134 if (IsDynamicTensor(outputInfo))
2135 {
2136 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2137 }
2138 else
2139 {
2140 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2141 }
Mike Kellyb8805202019-07-31 17:25:43 +01002142 }
2143 }
2144
2145 // Check if permutations is required and get the pair of permutations required for the concatenation.
2146 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2147 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002148 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002149 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2150 concatDim,
2151 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002152
Keith Davis6e4081f2020-09-03 13:17:21 +01002153 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2154 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002155 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002156 if (needPermute)
2157 {
2158 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2159 }
2160
2161 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002162 }
Mike Kellyb8805202019-07-31 17:25:43 +01002163 // this is no-op for identity swizzles, otherwise it replaces both
2164 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002165 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002166 {
2167 return false;
2168 }
Mike Kellyb8805202019-07-31 17:25:43 +01002169
2170 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2171 armnn::OriginsDescriptor concatDescriptor;
2172
2173 try
2174 {
2175 // The concat descriptor is always created across the only supported concat dimension
2176 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002177 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2178 inputShapes.end(),
2179 concatDim);
2180 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002181 {
2182 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2183 }
2184
2185 // Validate the output shape is correct given the input shapes based on the
2186 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002187 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002188 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002189 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2190 {
2191 return Fail("%s: Error validating the output shape for concat", __func__);
2192 }
Mike Kellyb8805202019-07-31 17:25:43 +01002193 }
2194
2195 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2196 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002197 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002198
Keith Davis6e4081f2020-09-03 13:17:21 +01002199 bool isSupported = false;
2200 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2201 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2202 outputInfo, concatDescriptor);
2203 };
2204
2205 if (!isDynamicTensor)
2206 {
2207 validateFunc(outputInfo, isSupported);
2208 }
2209 else
2210 {
2211 isSupported = AreDynamicTensorsSupported();
2212 }
2213
Mike Kellyb8805202019-07-31 17:25:43 +01002214 if (!isSupported)
2215 {
2216 return false;
2217 }
2218
2219 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2220 assert(layer != nullptr);
2221 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002222 // Connect inputs to the layer
2223 const int numInputSlots = layer->GetNumInputSlots();
2224 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2225 for (int i = 0; i < numInputSlots; ++i)
2226 {
2227 // connect the input directly to the merge (concat) layer
2228 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2229 }
2230
Keith Davis6e4081f2020-09-03 13:17:21 +01002231 // Transpose the output shape
2232 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002233 armnn::TransposeDescriptor transposeDesc;
2234 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002235 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2236 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2237 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002238 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002239 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002240 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002241 data.m_Backends,
2242 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002243 inputTransposeInfo,
2244 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002245 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002246 if (!isSupported)
2247 {
2248 return false;
2249 }
Mike Kellyb8805202019-07-31 17:25:43 +01002250 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002251 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002252 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002253 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002254
2255 return true;
2256 };
2257
2258 if (needPermute && !isDynamicTensor)
2259 {
2260 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002261 }
2262
2263 if (inputsHaveBeenReshaped)
2264 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002265 if (isDynamicTensor)
2266 {
2267 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002268 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002269 if (!ValidateConcatOutputShape(inputShapes,
2270 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2271 concatDim))
2272 {
2273 return Fail("%s: Error validating the output shape for concat", __func__);
2274 }
2275 transposeOutputShape();
2276 }
2277
Mike Kellyb8805202019-07-31 17:25:43 +01002278 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002279 // Undo the reshape knowing the amount of dimensions added
2280 if (tensorDimensionsAdded == 1)
2281 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002282 afterConcatInfo.SetShape(
2283 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002284 }
2285 else if (tensorDimensionsAdded == 2)
2286 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002287 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002288 }
2289
Kevin Mayaed08ac2019-12-12 16:33:31 +00002290 armnn::ReshapeDescriptor reshapeDescriptor;
2291 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002292 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002293
Keith Davis6e4081f2020-09-03 13:17:21 +01002294 isSupported = false;
2295 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2296 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2297 IsReshapeSupported,
2298 data.m_Backends,
2299 isSupported,
2300 concatInfo,
2301 afterConcatInfo,
2302 reshapeDescriptor);
2303 };
2304
2305 if (!IsDynamicTensor(afterConcatInfo))
2306 {
2307 validateReshapeFunc(afterConcatInfo, isSupported);
2308 }
2309 else
2310 {
2311 isSupported = AreDynamicTensorsSupported();
2312 }
2313
Kevin Mayaed08ac2019-12-12 16:33:31 +00002314 if (!isSupported)
2315 {
2316 return false;
2317 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002318 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2319 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2320 0,
2321 *layer,
2322 model,
2323 data,
2324 nullptr,
2325 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002326 }
2327
Keith Davis6e4081f2020-09-03 13:17:21 +01002328 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002329}
2330
2331template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002332 typename HalOperation = typename HalPolicy::Operation,
2333 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002334bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2335{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002336 using HalOperand = typename HalPolicy::Operand;
2337 using HalOperandType = typename HalPolicy::OperandType;
2338
2339 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002340 if (!input.IsValid())
2341 {
2342 return Fail("%s: Operation has invalid inputs", __func__);
2343 }
2344
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002345 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002346 if (!output)
2347 {
2348 return Fail("%s: Could not read output 0", __func__);
2349 }
2350
2351 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002352 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002353
2354 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002355 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2356 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002357
2358 if (!weightsPin.IsValid() || !biasPin.IsValid())
2359 {
2360 return Fail("%s: Operation has invalid inputs", __func__);
2361 }
2362
2363 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002364 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002365 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2366
2367 armnn::Convolution2dDescriptor desc;
2368 desc.m_DataLayout = armnn::DataLayout::NHWC;
2369 ActivationFn activation;
2370
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002371 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002372 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002373 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2374 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2375 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2376 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2377 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2378 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002379 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380 {
2381 return Fail("%s: Operation has invalid inputs", __func__);
2382 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002383 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002384 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002385 {
2386 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002387 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2388 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2389 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002390 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002391 {
2392 return Fail("%s: Operation has invalid inputs", __func__);
2393 }
2394
2395 const uint32_t kernelX = weights.GetShape()[2];
2396 const uint32_t kernelY = weights.GetShape()[1];
2397 const uint32_t inputX = inputInfo.GetShape()[2];
2398 const uint32_t inputY = inputInfo.GetShape()[1];
2399
2400 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2401 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002402 }
2403 else
2404 {
2405 return Fail("%s: Unsupported number of operation inputs", __func__);
2406 }
2407
2408 desc.m_BiasEnabled = true;
2409 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2410
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002411 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002412 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2413 {
2414 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2415 IsConvolution2dSupported,
2416 data.m_Backends,
2417 isSupported,
2418 inputInfo,
2419 outputInfo,
2420 desc,
2421 weights.GetInfo(),
2422 biases);
2423 };
2424
2425 if(!IsDynamicTensor(outputInfo))
2426 {
2427 validateFunc(outputInfo, isSupported);
2428 }
2429 else
2430 {
2431 isSupported = AreDynamicTensorsSupported();
2432 }
2433
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002434 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002435 {
2436 return false;
2437 }
2438
2439 armnn::IConnectableLayer* startLayer =
2440 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2441
2442 if (!startLayer)
2443 {
2444 return Fail("%s: AddConvolution2dLayer failed", __func__);
2445 }
2446
Mike Kellyb5fdf382019-06-11 16:35:25 +01002447 input.Connect(startLayer->GetInputSlot(0));
2448
Kevin Mayfcf2a152020-09-08 16:06:32 +01002449 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2450 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002451}
2452
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002453template<typename HalPolicy,
2454 typename HalOperation = typename HalPolicy::Operation,
2455 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002456bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2457{
2458 using HalOperand = typename HalPolicy::Operand;
2459 using HalOperandType = typename HalPolicy::OperandType;
2460
2461 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2462 if (!input.IsValid() )
2463 {
2464 return Fail("%s: Operation has invalid inputs", __func__);
2465 }
2466
2467 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2468 unsigned int rank = inputInfo.GetNumDimensions();
2469 if (rank != 4)
2470 {
2471 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2472 }
2473
2474 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2475 if (!output)
2476 {
2477 return Fail("%s: Could not read output 0", __func__);
2478 }
2479
2480 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002481
2482 armnn::DepthToSpaceDescriptor descriptor;
2483
2484 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2485 if (descriptor.m_BlockSize <= 1)
2486 {
2487 return Fail("%s: Block size must be at least 1 in all dimensions");
2488 }
2489
2490 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002491 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002492 {
2493 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2494 }
2495
2496 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002497 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2498 {
2499 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2500 IsDepthToSpaceSupported,
2501 data.m_Backends,
2502 isSupported,
2503 inputInfo,
2504 outputInfo,
2505 descriptor);
2506 };
2507
2508 if(!IsDynamicTensor(outputInfo))
2509 {
2510 validateFunc(outputInfo, isSupported);
2511 }
2512 else
2513 {
2514 isSupported = AreDynamicTensorsSupported();
2515 }
2516
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002517 if (!isSupported)
2518 {
2519 return false;
2520 }
2521
2522 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2523 assert(layer != nullptr);
2524 input.Connect(layer->GetInputSlot(0));
2525
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002526 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002527}
2528
2529template<typename HalPolicy,
2530 typename HalOperation = typename HalPolicy::Operation,
2531 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002532bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2533{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002534 using HalOperand = typename HalPolicy::Operand;
2535 using HalOperandType = typename HalPolicy::OperandType;
2536
2537 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002538
2539 if (!input.IsValid())
2540 {
2541 return Fail("%s: Operation has invalid inputs", __func__);
2542 }
2543
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002544 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002545
2546 if (!output)
2547 {
2548 return Fail("%s: Could not read output 0", __func__);
2549 }
2550
2551 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002552 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002553
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002554 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002555 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002556 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002557
2558 if (weightsOperand == nullptr)
2559 {
2560 return Fail("%s: Operand is invalid", __func__);
2561 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002562 // Basic sanity check on the weights shape.
2563 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2564 // [1, filter_height, filter_width, depth_out]
2565 if (weightsOperand->dimensions[0] != 1)
2566 {
2567 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2568 }
2569
Mike Kellyb5fdf382019-06-11 16:35:25 +01002570 armnn::DepthwiseConvolution2dDescriptor desc;
2571 desc.m_DataLayout = armnn::DataLayout::NHWC;
2572
Jan Eilersa20d2b82021-04-27 09:21:08 +01002573 // The layout for weights in depthwise is [ 1, H, W, O] and it's the same in ArmNN. No need to permute anything.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002574 const ConstTensorPin weightsPin =
2575 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2576 1,
2577 model,
Jan Eilersa20d2b82021-04-27 09:21:08 +01002578 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002579
2580 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002581 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002582
2583 if (!weightsPin.IsValid() || !biasPin.IsValid())
2584 {
2585 return Fail("%s: Operation has invalid inputs", __func__);
2586 }
2587
2588 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2589 armnn::ConstTensor bias = biasPin.GetConstTensor();
2590 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2591
2592 ActivationFn activation;
2593
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002594 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002595 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002596 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2597 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2598 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2599 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2600 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2601 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002602 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002603 {
2604 return Fail("%s: Operation has invalid inputs", __func__);
2605 }
2606 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002607 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002608 {
2609 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002610 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2611 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2612 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002613 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002614 {
2615 return Fail("%s: Operation has invalid inputs", __func__);
2616 }
2617
Jan Eilersa20d2b82021-04-27 09:21:08 +01002618 const uint32_t kernelX = weights.GetShape()[2];
2619 const uint32_t kernelY = weights.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002620 const uint32_t inputX = inputInfo.GetShape()[2];
2621 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002622
2623 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2624 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2625 }
2626 else
2627 {
2628 return Fail("%s: Unsupported number of operation inputs", __func__);
2629 }
2630
2631 desc.m_BiasEnabled = true;
2632 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2633
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002634 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002635 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2636 {
2637 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2638 IsDepthwiseConvolutionSupported,
2639 data.m_Backends,
2640 isSupported,
2641 inputInfo,
2642 outputInfo,
2643 desc,
2644 weights.GetInfo(),
2645 biases);
2646 };
2647
2648 if(!IsDynamicTensor(outputInfo))
2649 {
2650 validateFunc(outputInfo, isSupported);
2651 }
2652 else
2653 {
2654 isSupported = AreDynamicTensorsSupported();
2655 }
2656
2657
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002658 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002659 {
2660 return false;
2661 }
2662
2663 armnn::IConnectableLayer* startLayer =
2664 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2665 if (!startLayer)
2666 {
2667 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2668 }
2669
Mike Kellyb5fdf382019-06-11 16:35:25 +01002670 input.Connect(startLayer->GetInputSlot(0));
2671
Kevin Mayfcf2a152020-09-08 16:06:32 +01002672 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2673 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002674}
2675
Mike Kelly3c673942019-07-25 09:26:06 +01002676template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002677 typename HalOperation = typename HalPolicy::Operation,
2678 typename HalModel = typename HalPolicy::Model>
2679bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002680{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002681 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002682
2683 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2684 if (!input.IsValid())
2685 {
2686 return Fail("%s: Operation has invalid input", __func__);
2687 }
2688
Sadik Armagan98c0f662019-11-21 15:54:36 +00002689 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2690 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2691 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2692 {
2693 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2694 }
2695
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002696 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002697 if (!outputOperand)
2698 {
2699 return Fail("%s: Operation has invalid outputs", __func__);
2700 }
2701
2702 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002703
2704 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002705 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2706 {
2707 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2708 IsDequantizeSupported,
2709 data.m_Backends,
2710 isSupported,
2711 inputInfo,
2712 outputInfo);
2713 };
2714
2715 if(IsDynamicTensor(outputInfo))
2716 {
2717 isSupported = AreDynamicTensorsSupported();
2718 }
2719 else
2720 {
2721 validateFunc(outputInfo, isSupported);
2722 }
2723
Mike Kelly46272802019-08-14 17:00:48 +01002724 if (!isSupported)
2725 {
2726 return false;
2727 }
2728
2729 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2730 assert(layer != nullptr);
2731 input.Connect(layer->GetInputSlot(0));
2732
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002733 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002734}
2735
2736template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002737 typename HalOperation = typename HalPolicy::Operation,
2738 typename HalModel = typename HalPolicy::Model>
2739bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002740{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002741 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002742
2743 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2744 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2745
2746 if (!input0.IsValid() || !input1.IsValid())
2747 {
2748 return Fail("%s: Operation has invalid inputs", __func__);
2749 }
2750
2751 // The FuseActivation parameter is always the input index 2
2752 // and it should be optional
2753 ActivationFn activationFunction;
2754 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2755 {
2756 return Fail("%s: Operation has invalid inputs", __func__);
2757 }
2758
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002759 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002760 if (!output)
2761 {
2762 return Fail("%s: Could not read output 0", __func__);
2763 }
2764
2765 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002766
2767 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002768 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2769 {
2770 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2771 IsDivisionSupported,
2772 data.m_Backends,
2773 isSupported,
2774 input0.GetTensorInfo(),
2775 input1.GetTensorInfo(),
2776 outputInfo);
2777 };
2778
2779 if(!IsDynamicTensor(outputInfo))
2780 {
2781 validateFunc(outputInfo, isSupported);
2782 }
2783 else
2784 {
2785 isSupported = AreDynamicTensorsSupported();
2786 }
2787
Mike Kelly46272802019-08-14 17:00:48 +01002788 if (!isSupported)
2789 {
2790 return false;
2791 }
2792
2793 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002794
Kevin Mayfcf2a152020-09-08 16:06:32 +01002795 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2796 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002797 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002798 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002799 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002800
2801 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2802 data, nullptr, validateFunc, activationFunction);
2803
Mike Kelly46272802019-08-14 17:00:48 +01002804}
2805
2806template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002807 typename HalOperation = typename HalPolicy::Operation,
2808 typename HalModel = typename HalPolicy::Model>
2809bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002810{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002811 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002812
2813 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2814 if (!input.IsValid())
2815 {
2816 return Fail("%s: Operation has invalid inputs", __func__);
2817 }
2818
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002819 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002820 if (!outputOperand)
2821 {
2822 return Fail("%s: Operation has invalid outputs", __func__);
2823 }
2824
2825 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002826
2827 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002828 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2829 {
2830 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2831 IsFloorSupported,
2832 data.m_Backends,
2833 isSupported,
2834 input.GetTensorInfo(),
2835 outputInfo);
2836 };
2837
2838 if(!IsDynamicTensor(outputInfo))
2839 {
2840 validateFunc(outputInfo, isSupported);
2841 }
2842 else
2843 {
2844 isSupported = AreDynamicTensorsSupported();
2845 }
2846
Mike Kelly46272802019-08-14 17:00:48 +01002847 if (!isSupported)
2848 {
2849 return false;
2850 }
2851
2852 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2853 assert(layer != nullptr);
2854 input.Connect(layer->GetInputSlot(0));
2855
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002856 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002857}
2858
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002859inline bool IsQSymm8(const V1_0::Operand&)
2860{
2861 return false;
2862}
2863
Kevin May42477c12020-03-26 13:34:14 +00002864#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002865
2866inline bool IsQSymm8(const V1_2::Operand& operand)
2867{
2868 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2869}
2870
2871#endif
2872
Kevin May42477c12020-03-26 13:34:14 +00002873#ifdef ARMNN_ANDROID_NN_V1_3
2874
2875inline bool IsQSymm8(const V1_3::Operand& operand)
2876{
2877 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2878}
2879
2880#endif
2881
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002882enum class DequantizeStatus
2883{
2884 SUCCESS,
2885 NOT_REQUIRED,
2886 INVALID_OPERAND
2887};
2888
2889using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2890
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002891template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002892 typename HalOperation = typename HalPolicy::Operation,
2893 typename HalModel = typename HalPolicy::Model>
2894DequantizeResult DequantizeIfRequired(size_t operand_index,
2895 const HalOperation& operation,
2896 const HalModel& model,
2897 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002898{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002899 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002900
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002901 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002902 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002903 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002904 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002905 }
2906
2907 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2908 {
2909 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002910 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002911 }
2912
2913 const size_t weightsInputIndex = operation.inputs[operand_index];
2914
2915 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2916 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002917 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002918 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002919 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002920 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002921 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2922 {
2923 continue;
2924 }
2925
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002926 size_t outOpIndex = weightsInputIndex + 1;
2927 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002928 {
2929 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002930 }
2931
2932 if (outOpIndex != weightsInputIndex)
2933 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002934 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002935 }
2936
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002937 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002938 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002939
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002940 if (!IsQSymm8(*operand))
2941 {
2942 // Only supporting dequantize from QSYMM8 to FLOAT
2943 break;
2944 }
2945
2946 // Allocate a new buffer for the dequantized data and manually dequantize
2947 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2948 if (!startValue)
2949 {
2950 // Failed to get the operand address
2951 break;
2952 }
2953
2954 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2955 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002956 const float quantizationScale = operand->scale;
2957
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002958 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2959 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2960 {
2961 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002962 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002963 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2964 }
2965
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002966 // Construct tensor info for dequantized ConstTensor
2967 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2968 operand->dimensions.data(),
2969 armnn::DataType::Float32);
2970
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002971 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2972 std::move(tensorInfo),
2973 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002974 }
2975
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002976 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002977}
2978
2979template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002980 typename HalOperation = typename HalPolicy::Operation,
2981 typename HalModel = typename HalPolicy::Model>
2982ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2983 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002984 const ConversionData& data,
2985 size_t operandIndex,
2986 bool optional = false)
2987{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002988 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2989
2990 DequantizeStatus status = std::get<3>(dequantized);
2991 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002992 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002993 case DequantizeStatus::INVALID_OPERAND:
2994 {
2995 // return invalid const tensor pin
2996 return ConstTensorPin();
2997 }
2998 case DequantizeStatus::NOT_REQUIRED:
2999 {
3000 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3001 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3002 }
3003 case DequantizeStatus::SUCCESS:
3004 default:
3005 {
3006 return ConstTensorPin(
3007 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3008 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003009 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003010}
3011
3012
Mike Kelly46272802019-08-14 17:00:48 +01003013template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003014 typename HalOperation = typename HalPolicy::Operation,
3015 typename HalModel = typename HalPolicy::Model>
3016bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003017{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003018 using HalOperand = typename HalPolicy::Operand;
3019
Mike Kelly46272802019-08-14 17:00:48 +01003020 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3021 if (!input.IsValid())
3022 {
3023 return Fail("%s: Operation has invalid inputs", __func__);
3024 }
3025
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003026 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003027 if (!output)
3028 {
3029 return Fail("%s: Could not read output 0", __func__);
3030 }
3031
3032 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3033 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3034
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003035 LayerInputHandle weightsInput = LayerInputHandle();
3036 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3037 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003038 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003039 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003040 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003041
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003042 // If weights are constant a separate constant layer will be created to store data.
3043 // Otherwise handle non const weights as inputs.
3044 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3045 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003046 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003047 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003048 }
3049
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003050 LayerInputHandle biasInput = LayerInputHandle();
3051 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3052 if (!biasOperand)
3053 {
3054 return Fail("%s: Could not read bias", __func__);
3055 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003056
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003057 // If bias are constant a separate constant layer will be created to store data.
3058 // Otherwise handle non const bias as inputs.
3059 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3060 if (!biasInput.IsValid())
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003061 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003062 return Fail("%s: Operation has invalid inputs", __func__);
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003063 }
3064
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003065 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003066 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003067 try
3068 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003069 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003070 }
3071 catch (const std::exception& e)
3072 {
Mike Kelly46272802019-08-14 17:00:48 +01003073 return Fail("%s: %s", __func__, e.what());
3074 }
3075
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003076 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003077 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003078 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003079
3080 ActivationFn activationFunction;
3081 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3082 {
3083 return Fail("%s: Operation has invalid inputs", __func__);
3084 }
3085
3086 armnn::FullyConnectedDescriptor desc;
3087 desc.m_TransposeWeightMatrix = true;
3088 desc.m_BiasEnabled = true;
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003089 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003090
3091 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003092 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3093 {
Finn Williams49184462020-10-02 13:28:34 +01003094 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003095 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003096 outputInfo.GetShape(),
3097 desc.m_TransposeWeightMatrix))
3098 {
3099 isSupported = false;
3100 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3101 return;
3102 }
3103
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003104 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003105 IsFullyConnectedSupported,
3106 data.m_Backends,
3107 isSupported,
3108 reshapedInfo,
3109 outputInfo,
3110 weightsInfo,
3111 biasInfo,
3112 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003113 };
3114
3115 if(!IsDynamicTensor(outputInfo))
3116 {
3117 validateFunc(outputInfo, isSupported);
3118 }
3119 else
3120 {
3121 isSupported = AreDynamicTensorsSupported();
3122 }
3123
Mike Kelly46272802019-08-14 17:00:48 +01003124 if (!isSupported)
3125 {
3126 return false;
3127 }
3128
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003129 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3130 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Mike Kelly46272802019-08-14 17:00:48 +01003131
Kevin Mayfcf2a152020-09-08 16:06:32 +01003132 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003133 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003134 armnn::ReshapeDescriptor reshapeDescriptor;
3135 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003136
Kevin Mayfcf2a152020-09-08 16:06:32 +01003137 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3138 assert(reshapeLayer != nullptr);
3139 input.Connect(reshapeLayer->GetInputSlot(0));
3140 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3141 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003142 }
3143 else
3144 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003145 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003146 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003147
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003148 // Connect weights and bias inputs
3149 weightsInput.Connect(startLayer->GetInputSlot(1));
3150 biasInput.Connect(startLayer->GetInputSlot(2));
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003151
Kevin Mayfcf2a152020-09-08 16:06:32 +01003152 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3153 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003154}
3155
3156template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003157 typename HalOperation = typename HalPolicy::Operation,
3158 typename HalModel = typename HalPolicy::Model>
3159bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003160{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003161 using HalOperand = typename HalPolicy::Operand;
3162
Mike Kelly999e2092019-08-15 10:46:46 +01003163 if (operation.inputs.size() != 1)
3164 {
3165 return Fail("%s: Optional inputs are not supported", __func__);
3166 }
3167
Mike Kelly46272802019-08-14 17:00:48 +01003168 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3169 if (!input.IsValid())
3170 {
3171 return Fail("%s: Operation has invalid inputs", __func__);
3172 }
3173
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003174 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003175 if (!output)
3176 {
3177 return Fail("%s: Could not read output 0", __func__);
3178 }
3179
3180 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3181 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3182
Mike Kelly46272802019-08-14 17:00:48 +01003183 if (outputInfo.GetNumDimensions() != 4u)
3184 {
3185 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3186 }
3187
3188 armnn::L2NormalizationDescriptor desc;
3189 desc.m_DataLayout = armnn::DataLayout::NHWC;
3190
3191 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003192 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3193 {
3194 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3195 IsL2NormalizationSupported,
3196 data.m_Backends,
3197 isSupported,
3198 inputInfo,
3199 outputInfo,
3200 desc);
3201 };
3202
3203 if(!IsDynamicTensor(outputInfo))
3204 {
3205 validateFunc(outputInfo, isSupported);
3206 }
3207 else
3208 {
3209 isSupported = AreDynamicTensorsSupported();
3210 }
3211
Mike Kelly46272802019-08-14 17:00:48 +01003212 if (!isSupported)
3213 {
3214 return false;
3215 }
3216
3217 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3218 assert(layer != nullptr);
3219 input.Connect(layer->GetInputSlot(0));
3220
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003221 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003222}
3223
3224template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003225 typename HalOperation = typename HalPolicy::Operation,
3226 typename HalModel = typename HalPolicy::Model>
3227bool ConvertLocalResponseNormalization(const HalOperation& operation,
3228 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003229 ConversionData& data)
3230{
Mike Kelly999e2092019-08-15 10:46:46 +01003231 if (operation.inputs.size() != 5)
3232 {
3233 return Fail("%s: Optional inputs are not supported", __func__);
3234 }
3235
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003236 using HalOperand = typename HalPolicy::Operand;
3237 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003238
3239 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3240 if (!input.IsValid())
3241 {
3242 return Fail("%s: Operation has invalid inputs", __func__);
3243 }
3244
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003245 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003246 if (!output)
3247 {
3248 return Fail("%s: Could not read output 0", __func__);
3249 }
3250
3251 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3252 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3253
Mike Kelly46272802019-08-14 17:00:48 +01003254 if (outputInfo.GetNumDimensions() != 4u)
3255 {
3256 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3257 }
3258
3259 armnn::NormalizationDescriptor descriptor;
3260 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3261 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3262 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3263
3264 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003265 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003266 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3267 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3268 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3269 {
3270 return Fail("%s: Operation has invalid inputs", __func__);
3271 }
3272
3273 // ArmNN expects normSize to be the full size of the normalization
3274 // window rather than the radius as in AndroidNN.
3275 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3276
3277 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003278 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3279 {
3280 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3281 IsNormalizationSupported,
3282 data.m_Backends,
3283 isSupported,
3284 inputInfo,
3285 outputInfo,
3286 descriptor);
3287 };
3288
3289 if(!IsDynamicTensor(outputInfo))
3290 {
3291 validateFunc(outputInfo, isSupported);
3292 }
3293 else
3294 {
3295 isSupported = AreDynamicTensorsSupported();
3296 }
3297
Mike Kelly46272802019-08-14 17:00:48 +01003298 if (!isSupported)
3299 {
3300 return false;
3301 }
3302
3303
3304 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3305 assert(layer != nullptr);
3306 input.Connect(layer->GetInputSlot(0));
3307
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003308 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003309}
3310
3311template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003312 typename HalOperation = typename HalPolicy::Operation,
3313 typename HalModel = typename HalPolicy::Model>
3314bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003315{
Mike Kelly46272802019-08-14 17:00:48 +01003316 armnn::ActivationDescriptor desc;
3317 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3318
3319 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3320}
3321
3322template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003323 typename HalOperation = typename HalPolicy::Operation,
3324 typename HalModel = typename HalPolicy::Model>
3325bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003326{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003327 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003328
3329 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3330 if (!input.IsValid())
3331 {
3332 return Fail("%s: Operation has invalid inputs", __func__);
3333 }
3334
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003335 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003336 if (!output)
3337 {
3338 return Fail("%s: Could not read output 0", __func__);
3339 }
3340
3341 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003342
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003343 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003344 if (!axisOperand)
3345 {
3346 return Fail("%s: Could not read input 1", __func__);
3347 }
3348
3349 std::vector<int32_t> axis;
3350 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3351 {
3352 return Fail("%s: Input 1 has invalid values", __func__);
3353 }
3354
3355 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3356
3357 // Convert the axis to unsigned int and remove duplicates.
3358 unsigned int rank = inputInfo.GetNumDimensions();
3359 std::set<unsigned int> uniqueAxis;
3360 std::transform(axis.begin(), axis.end(),
3361 std::inserter(uniqueAxis, uniqueAxis.begin()),
3362 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3363
3364 // Get the "keep dims" flag.
3365 int32_t keepDims = 0;
3366 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3367 {
3368 return Fail("%s: Could not read input 2", __func__);
3369 }
3370
3371 armnn::MeanDescriptor descriptor;
3372 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3373 descriptor.m_KeepDims = keepDims > 0;
3374
3375 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003376 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3377 {
3378 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3379 IsMeanSupported,
3380 data.m_Backends,
3381 isSupported,
3382 inputInfo,
3383 outputInfo,
3384 descriptor);
3385 };
3386
3387 if(!IsDynamicTensor(outputInfo))
3388 {
3389 validateFunc(outputInfo, isSupported);
3390 }
3391 else
3392 {
3393 isSupported = AreDynamicTensorsSupported();
3394 }
3395
Mike Kelly46272802019-08-14 17:00:48 +01003396 if (!isSupported)
3397 {
3398 return false;
3399 }
3400
3401 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3402 assert(layer != nullptr);
3403 input.Connect(layer->GetInputSlot(0));
3404
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003405 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003406}
3407
3408template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003409 typename HalOperation = typename HalPolicy::Operation,
3410 typename HalModel = typename HalPolicy::Model>
3411bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003412{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003413 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003414
3415 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3416 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3417
3418 if (!input0.IsValid() || !input1.IsValid())
3419 {
3420 return Fail("%s: Operation has invalid inputs", __func__);
3421 }
3422
3423 // The FuseActivation parameter is always the input index 2
3424 // and it should be optional
3425 ActivationFn activationFunction;
3426 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3427 {
3428 return Fail("%s: Operation has invalid inputs", __func__);
3429 }
3430
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003431 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003432
3433 if (outputOperand == nullptr)
3434 {
3435 return false;
3436 }
3437
3438 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003439
3440 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003441 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3442 {
3443 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3444 IsMultiplicationSupported,
3445 data.m_Backends,
3446 isSupported,
3447 input0.GetTensorInfo(),
3448 input1.GetTensorInfo(),
3449 outputInfo);
3450 };
3451
3452 if(!IsDynamicTensor(outputInfo))
3453 {
3454 validateFunc(outputInfo, isSupported);
3455 }
3456 else
3457 {
3458 isSupported = AreDynamicTensorsSupported();
3459 }
3460
Mike Kelly46272802019-08-14 17:00:48 +01003461 if (!isSupported)
3462 {
3463 return false;
3464 }
3465
3466 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003467
Kevin Mayfcf2a152020-09-08 16:06:32 +01003468 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3469 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003470 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003471 return false;
3472 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003473
Kevin Mayfcf2a152020-09-08 16:06:32 +01003474 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3475 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003476}
3477
3478template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003479 typename HalOperation = typename HalPolicy::Operation,
3480 typename HalModel = typename HalPolicy::Model>
3481bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003482{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003483 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003484
Mike Kelly3c673942019-07-25 09:26:06 +01003485 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3486 if (!input.IsValid())
3487 {
3488 return Fail("%s: Operation has invalid inputs", __func__);
3489 }
3490
3491 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3492 unsigned int rank = inputInfo.GetNumDimensions();
3493
3494 armnn::PadDescriptor descriptor;
3495 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3496 {
3497 return Fail("%s: Could not convert paddings", __func__);
3498 }
3499
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003500 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3501 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003502 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3503 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3504 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003505 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003506 {
3507 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3508 }
3509
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003510 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003511 if (!output)
3512 {
3513 return Fail("%s: Could not read output", __func__);
3514 }
3515
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003516 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003517
3518 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003519 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3520 {
3521 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3522 IsPadSupported,
3523 data.m_Backends,
3524 isSupported,
3525 inputInfo,
3526 outputInfo,
3527 descriptor);
3528 };
3529
3530 if(!IsDynamicTensor(outputInfo))
3531 {
3532 validateFunc(outputInfo, isSupported);
3533 }
3534 else
3535 {
3536 isSupported = AreDynamicTensorsSupported();
3537 }
3538
Mike Kelly3c673942019-07-25 09:26:06 +01003539 if (!isSupported)
3540 {
3541 return false;
3542 }
3543
3544 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3545 assert(layer != nullptr);
3546 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003547
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003548 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003549}
3550
Mike Kelly0a879362019-07-29 16:56:31 +01003551template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003552 typename HalOperation = typename HalPolicy::Operation,
3553 typename HalModel = typename HalPolicy::Model>
3554bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003555{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003556 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003557
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003558 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3559 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3560 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003561
3562 if (inputOperand == nullptr
3563 || requestedShapeOperand == nullptr
3564 || outputOperand == nullptr)
3565 {
3566 return Fail("%s: Operation has invalid inputs", __func__);
3567 }
3568
3569 if (requestedShapeOperand->dimensions.size() != 1)
3570 {
3571 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3572 __func__, requestedShapeOperand->dimensions.size());
3573 }
3574
3575 std::vector<int32_t> targetDimensions;
3576 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3577 {
3578 return Fail("%s: Could not read values of input 1", __func__);
3579 }
3580
3581 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3582
3583 Shape requestedShape;
3584 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3585 // function that resolves these values into a fully specified tensor shape.
3586 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3587 {
3588 return Fail("%s: Failed to resolve the requested shape", __func__);
3589 }
3590
Mike Kelly46272802019-08-14 17:00:48 +01003591 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3592 if (!input.IsValid())
3593 {
3594 return Fail("%s: Could not read input 0", __func__);
3595 }
3596
3597 armnn::ReshapeDescriptor reshapeDescriptor;
3598 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3599 requestedShape.dimensions.data());
3600
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003601 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3602
Mike Kelly46272802019-08-14 17:00:48 +01003603 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003604 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3605 {
3606 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3607 IsReshapeSupported,
3608 data.m_Backends,
3609 isSupported,
3610 input.GetTensorInfo(),
3611 outputInfo,
3612 reshapeDescriptor);
3613 };
3614
3615 if(!IsDynamicTensor(outputInfo))
3616 {
3617 validateFunc(outputInfo, isSupported);
3618 }
3619 else
3620 {
3621 isSupported = AreDynamicTensorsSupported();
3622 }
3623
Mike Kelly46272802019-08-14 17:00:48 +01003624 if (!isSupported)
3625 {
3626 return false;
3627 }
3628
3629 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3630 assert(layer != nullptr);
3631 input.Connect(layer->GetInputSlot(0));
3632
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003633 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003634}
3635
3636template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003637 typename HalOperation = typename HalPolicy::Operation,
3638 typename HalModel = typename HalPolicy::Model>
3639bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003640{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003641 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003642
Mike Kelly0a879362019-07-29 16:56:31 +01003643 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3644 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3645
3646 if (!input0.IsValid() || !input1.IsValid())
3647 {
3648 return Fail("%s: Operation has invalid inputs", __func__);
3649 }
3650
3651 // The FuseActivation parameter is always the input index 2
3652 // and it should be optional
3653 ActivationFn activationFunction;
3654 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3655 {
3656 return Fail("%s: Operation has invalid inputs", __func__);
3657 }
3658
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003659 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003660 if (!output)
3661 {
3662 return Fail("%s: Could not read output 0", __func__);
3663 }
3664
3665 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003666
3667 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003668 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3669 {
3670 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3671 IsSubtractionSupported,
3672 data.m_Backends,
3673 isSupported,
3674 input0.GetTensorInfo(),
3675 input1.GetTensorInfo(),
3676 outputInfo);
3677 };
3678
3679 if(IsDynamicTensor(outputInfo))
3680 {
3681 isSupported = AreDynamicTensorsSupported();
3682 }
3683 else
3684 {
3685 validateFunc(outputInfo, isSupported);
3686 }
3687
Mike Kelly0a879362019-07-29 16:56:31 +01003688 if (!isSupported)
3689 {
3690 return false;
3691 }
3692
3693 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003694
Kevin Mayfcf2a152020-09-08 16:06:32 +01003695 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3696 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003697 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003698 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003699 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003700 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3701 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003702}
3703
Finn Williams23b87b32019-07-30 11:44:05 +01003704template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003705 typename HalOperation = typename HalPolicy::Operation,
3706 typename HalModel = typename HalPolicy::Model>
3707bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003708{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003709 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003710
3711 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3712 if (!input.IsValid())
3713 {
3714 return Fail("%s: Operation has invalid inputs", __func__);
3715 }
3716
3717 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3718 unsigned int rank = inputInfo.GetNumDimensions();
3719 if (rank > 4)
3720 {
3721 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3722 }
3723
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003724 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003725 if (!output)
3726 {
3727 return Fail("%s: Could not read output 0", __func__);
3728 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003729
3730 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003731 {
3732 return Fail("%s: Dynamic output tensors are not supported", __func__);
3733 }
3734
3735 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3736 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003737 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003738
3739 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3740
3741 std::vector<int32_t> axis;
3742 if (!axisOperand)
3743 {
3744 axis.assign(dimensionSequence,
3745 dimensionSequence + rank);
3746 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003747 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003748 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003749 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003750 }
3751
3752 std::vector<uint32_t> outputDims;
3753 for (unsigned int i = 0; i < rank; i++)
3754 {
3755 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3756 auto currentDimension = inputInfo.GetShape()[i];
3757 if (skipSqueeze || currentDimension != 1)
3758 {
3759 outputDims.push_back(currentDimension);
3760 }
3761 }
3762
3763 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3764
3765 armnn::TensorInfo outputInfo = inputInfo;
3766 outputInfo.SetShape(outShape);
3767
3768 armnn::ReshapeDescriptor reshapeDesc;
3769 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3770
3771 bool isSupported = false;
3772 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3773 IsReshapeSupported,
3774 data.m_Backends,
3775 isSupported,
3776 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003777 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003778 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003779
Mike Kelly46272802019-08-14 17:00:48 +01003780 if (!isSupported)
3781 {
3782 return false;
3783 }
3784
3785 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3786 assert(layer != nullptr);
3787 input.Connect(layer->GetInputSlot(0));
3788
3789 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3790}
3791
3792template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003793 typename HalOperation = typename HalPolicy::Operation,
3794 typename HalModel = typename HalPolicy::Model>
3795bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003796{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003797 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003798
3799 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3800 if (!input.IsValid())
3801 {
3802 return Fail("%s: Operation has invalid inputs", __func__);
3803 }
3804
3805 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3806 unsigned int rank = inputInfo.GetNumDimensions();
3807 if (rank > 4)
3808 {
3809 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3810 }
3811
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003812 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003813 if (!output)
3814 {
3815 return Fail("%s: Could not read output 0", __func__);
3816 }
3817
3818 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003819
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003820 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3821 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3822 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003823
3824 std::vector<int32_t> beginValues;
3825 std::vector<int32_t> endValues;
3826 std::vector<int32_t> stridesValues;
3827
3828 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003829 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003830 {
3831 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3832 {
3833 return false;
3834 }
3835
3836 if (operandValues.size() != rank)
3837 {
3838 return false;
3839 }
3840
3841 return true;
3842 };
3843
3844 if (!ValidateInputOperands(*beginOperand, beginValues)
3845 || !ValidateInputOperands(*endOperand, endValues)
3846 || !ValidateInputOperands(*stridesOperand, stridesValues))
3847 {
3848 return Fail("%s: Operation has invalid input operand", __func__);
3849 }
3850
3851 // Stride cannot have value '0'
3852 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3853 {
3854 return Fail("%s: Stride must be non-zero value.", __func__);
3855 }
3856
3857 armnn::StridedSliceDescriptor descriptor;
3858 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3859 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3860 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3861 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3862
3863 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3864 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3865 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3866 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3867 {
3868 return Fail("%s: Operation has invalid inputs", __func__);
3869 }
3870
3871 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003872 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3873 {
3874 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3875 IsStridedSliceSupported,
3876 data.m_Backends,
3877 isSupported,
3878 inputInfo,
3879 outputInfo,
3880 descriptor);
3881 };
3882
3883 if(IsDynamicTensor(outputInfo))
3884 {
3885 isSupported = AreDynamicTensorsSupported();
3886 }
3887 else
3888 {
3889 validateFunc(outputInfo, isSupported);
3890 }
3891
Mike Kelly46272802019-08-14 17:00:48 +01003892 if (!isSupported)
3893 {
3894 return false;
3895 }
3896
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003897 // Check if slice can fit in a inferred output
3898 armnn::TensorShape inputShape = inputInfo.GetShape();
3899 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3900 {
3901 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003902
3903 if (descriptor.m_ShrinkAxisMask & (1 << i))
3904 {
3905 // If the difference between the start point and the end point of the slice on an axis being shrunk
3906 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3907 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3908 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3909 {
3910 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3911 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003912
3913 if(stride < 0)
3914 {
3915 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3916 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003917 }
3918 }
3919
Mike Kelly46272802019-08-14 17:00:48 +01003920 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3921 assert(layer != nullptr);
3922 input.Connect(layer->GetInputSlot(0));
3923
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003924 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003925}
3926
3927template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003928 typename HalOperation = typename HalPolicy::Operation,
3929 typename HalModel = typename HalPolicy::Model>
3930bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003931{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003932 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003933 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003934
3935 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3936 if (!input.IsValid())
3937 {
3938 return Fail("%s: Operation has invalid inputs", __func__);
3939 }
3940
3941 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3942 unsigned int rank = inputInfo.GetNumDimensions();
3943 if (rank > 4)
3944 {
3945 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3946 }
3947
3948 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3949 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003950 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003951
3952 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003953 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003954 {
Mike Kelly46272802019-08-14 17:00:48 +01003955 for (unsigned int i = rank; i > 0; i--)
3956 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003957 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003958 }
3959 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003960 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003961 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003962 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003963 }
3964
3965 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3966
Mike Kelly4a956582020-02-28 10:32:09 +00003967 armnn::TransposeDescriptor transposeDesc;
3968 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003969
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003970 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003971 if (!output)
3972 {
3973 return Fail("%s: Could not read output 0", __func__);
3974 }
3975
3976 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3977
3978 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003979 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3980 {
3981 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3982 IsTransposeSupported,
3983 data.m_Backends,
3984 isSupported,
3985 inputInfo,
3986 outputInfo,
3987 transposeDesc);
3988 };
3989
3990 if(IsDynamicTensor(outputInfo))
3991 {
3992 isSupported = AreDynamicTensorsSupported();
3993 }
3994 else
3995 {
3996 validateFunc(outputInfo, isSupported);
3997 }
3998
Mike Kelly46272802019-08-14 17:00:48 +01003999 if (!isSupported)
4000 {
4001 return false;
4002 }
4003
Mike Kelly4a956582020-02-28 10:32:09 +00004004 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01004005 assert(layer != nullptr);
4006 input.Connect(layer->GetInputSlot(0));
4007
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004008 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004009}
4010
4011template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004012 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004013 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004014 typename HalModel = typename HalPolicy::Model>
4015bool ConvertBatchToSpaceNd(const HalOperation& operation,
4016 const HalModel& model,
4017 ConversionData& data)
4018{
Finn Williams23b87b32019-07-30 11:44:05 +01004019
4020 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4021 if (!input.IsValid())
4022 {
4023 return Fail("%s: Operation has invalid inputs", __func__);
4024 }
4025
4026 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4027 if (!output)
4028 {
4029 return Fail("%s: Could not read output 0", __func__);
4030 }
4031
4032 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004033
4034 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4035 if (!blockOperand)
4036 {
4037 return Fail("%s: Could not read input 1", __func__);
4038 }
4039
4040 // Convert the block operand to int32
4041 std::vector<int32_t> block;
4042 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4043 {
4044 return Fail("%s: Input 1 has invalid values", __func__);
4045 }
4046
4047 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4048
4049 unsigned int rank = inputInfo.GetNumDimensions();
4050 if (rank != 4)
4051 {
4052 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4053 }
4054
4055 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4056 {
4057 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4058 " greater than or equal to 1", __func__);
4059 }
4060
4061 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4062 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4063 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4064
Kevin May42477c12020-03-26 13:34:14 +00004065 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004066 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004067 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004068 }
4069 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4070 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4071
4072 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004073 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4074 {
4075 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4076 IsBatchToSpaceNdSupported,
4077 data.m_Backends,
4078 isSupported,
4079 inputInfo,
4080 outputInfo,
4081 batchToSpaceNdDesc);
4082 };
4083
4084 if(!IsDynamicTensor(outputInfo))
4085 {
4086 validateFunc(outputInfo, isSupported);
4087 }
4088 else
4089 {
4090 isSupported = AreDynamicTensorsSupported();
4091 }
4092
4093
Finn Williams23b87b32019-07-30 11:44:05 +01004094 if (!isSupported)
4095 {
4096 return false;
4097 }
4098
4099 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4100 assert(layer != nullptr);
4101 input.Connect(layer->GetInputSlot(0));
4102
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004103 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004104}
Mike Kelly0a879362019-07-29 16:56:31 +01004105
Finn Williamsd74c5052019-07-30 17:06:00 +01004106template<typename HalPolicy,
4107 typename HalOperation = typename HalPolicy::Operation,
4108 typename HalOperand = typename HalPolicy::Operand,
4109 typename HalModel = typename HalPolicy::Model>
4110bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4111{
4112 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4113 if (!input.IsValid())
4114 {
4115 return Fail("%s: Operation has invalid inputs", __func__);
4116 }
4117
4118 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4119 unsigned int rank = inputInfo.GetNumDimensions();
4120 unsigned int spatialDim = rank - 2;
4121
4122 if (rank != 4)
4123 {
4124 Fail("%s: Only inputs with rank 4 are supported", __func__);
4125 }
4126
4127 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4128 if (!output)
4129 {
4130 return Fail("%s: Could not read output 0", __func__);
4131 }
4132
4133 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004134
4135 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4136 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4137
4138 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4139 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4140 {
4141 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4142 }
4143
4144 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004145 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4146 {
4147 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4148 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004149 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4150 {
4151 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4152 }
4153
4154 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4155 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4156 {
4157 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4158 }
4159
4160 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4161 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004162 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4163 {
4164 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4165 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004166 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4167 {
4168 int paddingBeforeInput = paddings[i];
4169 int paddingAfterInput = paddings[i + 1];
4170 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4171 {
4172 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4173 }
4174
4175 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4176 }
4177
4178 armnn::SpaceToBatchNdDescriptor descriptor;
4179 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4180 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4181 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4182
Kevin May42477c12020-03-26 13:34:14 +00004183 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004184 {
4185 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4186 }
4187
4188 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004189 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4190 {
4191 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4192 IsSpaceToBatchNdSupported,
4193 data.m_Backends,
4194 isSupported,
4195 inputInfo,
4196 outputInfo,
4197 descriptor);
4198 };
4199
4200 if(IsDynamicTensor(outputInfo))
4201 {
4202 isSupported = AreDynamicTensorsSupported();
4203 }
4204 else
4205 {
4206 validateFunc(outputInfo, isSupported);
4207 }
4208
Finn Williamsd74c5052019-07-30 17:06:00 +01004209 if (!isSupported)
4210 {
4211 return false;
4212 }
4213
4214 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4215 assert(layer != nullptr);
4216 input.Connect(layer->GetInputSlot(0));
4217
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004218 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004219}
4220
saoste01b8471482018-10-10 09:44:51 +01004221} // namespace armnn_driver