blob: ca314e2b414c99cf8b679bb457d0a46e555e4bc7 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000012#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010013#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
James Ward4e22f602020-10-20 15:50:33 +010024#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010025
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
Kevin Mayec1e5b82020-02-26 17:00:39 +000036#ifdef ARMNN_ANDROID_R
37using OperandType = android::nn::hal::OperandType;
38#endif
39
Sadik Armagan188675f2021-02-12 17:16:42 +000040#ifdef ARMNN_ANDROID_S
41#include <nnapi/Types.h>
42#endif
43
44
arovir01b0717b52018-09-05 17:03:25 +010045struct ConversionData
46{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010047 ConversionData(const std::vector<armnn::BackendId>& backends)
48 : m_Backends(backends)
49 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010050 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010051 {}
52
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010053 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010054 armnn::INetworkPtr m_Network;
55 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
56 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010057 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010058};
59
60class LayerInputHandle
61{
62public:
63 LayerInputHandle();
64 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
65
66 bool IsValid() const;
67
68 void Connect(armnn::IInputSlot& inputSlot);
69
Finn Williamsa4983ce2020-07-23 12:55:12 +010070 void Disconnect(armnn::IInputSlot& inputSlot);
71
arovir01b0717b52018-09-05 17:03:25 +010072 const armnn::TensorInfo& GetTensorInfo() const;
73
Cathal Corbett915f2a72022-04-15 14:12:08 +010074 void SanitizeQuantizationScale(LayerInputHandle& weight,
75 LayerInputHandle& input);
76
arovir01b0717b52018-09-05 17:03:25 +010077private:
78 armnn::IOutputSlot* m_OutputSlot;
79 bool m_Valid;
80 armnn::TensorInfo m_TensorInfo;
81};
82
83class ConstTensorPin
84{
85public:
86 // Creates an invalid tensor pin (can be used to signal errors)
87 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
88 ConstTensorPin(bool optional = false);
89
90 // @param tensorInfo TensorInfo associated with the tensor.
91 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
92 // the model being converted.
93 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010094 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010095 const armnn::PermutationVector& mappings);
96
97 ConstTensorPin(const ConstTensorPin& other) = delete;
98 ConstTensorPin(ConstTensorPin&& other) = default;
99
100 bool IsValid() const;
101 bool IsOptional() const;
102
103 const armnn::ConstTensor& GetConstTensor() const;
104 const armnn::ConstTensor* GetConstTensorPtr() const;
105
106private:
107 armnn::ConstTensor m_ConstTensor;
108
109 // Owned memory for swizzled tensor data, only required if the tensor needed
110 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
111 // the pools associated with the model being converted.
112 std::vector<uint8_t> m_SwizzledTensorData;
113
114 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
115 bool m_Optional;
116};
117
118} // namespace armnn_driver
119
120///
121/// Utility functions
122///
123
124namespace
125{
126
127using namespace armnn_driver;
128using namespace android::nn;
129
130// Convenience function to log the reason for failing to convert a model.
131// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
132template<class... Args>
133static bool Fail(const char* formatStr, Args&&... args)
134{
135 ALOGD(formatStr, std::forward<Args>(args)...);
136 return false;
137}
138
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
140// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
141#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100142try \
143{ \
144 for (auto&& backendId : backends) \
145 { \
146 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000147 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100148 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100149 std::string reasonIfUnsupported; \
150 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000151 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 } \
156 else \
157 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 if (reasonIfUnsupported.size() > 0) \
159 { \
160 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
161 } \
162 else \
163 { \
164 ALOGD("%s: not supported by armnn", funcName); \
165 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100166 } \
167 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100168 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100169 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100170 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100171 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100172 } \
173 if (!supported) \
174 { \
175 ALOGD("%s: not supported by any specified backend", funcName); \
176 } \
177} \
178catch (const armnn::InvalidArgumentException &e) \
179{ \
180 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
181}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100182
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000183template<typename HalOperand>
184armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100185{
186 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
187}
188
Matthew Bentham912b3622019-05-03 15:49:14 +0100189inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100190{
Matthew Bentham912b3622019-05-03 15:49:14 +0100191 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
192 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
193 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100194}
195
Kevin May42477c12020-03-26 13:34:14 +0000196#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100197
Keith Davis71006492020-01-06 17:44:16 +0000198// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
200{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000202 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000203 type == V1_2::OperandType::TENSOR_FLOAT16 ||
204 type == V1_2::OperandType::TENSOR_FLOAT32 ||
205 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000206 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000207 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
208 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209 type == V1_2::OperandType::TENSOR_INT32;
210}
211
212#endif
213
Kevin May42477c12020-03-26 13:34:14 +0000214#ifdef ARMNN_ANDROID_NN_V1_3
215
216// Support within the 1.3 driver for specific tensor data types
217inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
218{
219 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100220 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000221 type == V1_3::OperandType::TENSOR_FLOAT16 ||
222 type == V1_3::OperandType::TENSOR_FLOAT32 ||
223 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100224 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000225 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
226 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
227 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
228 type == V1_3::OperandType::TENSOR_INT32;
229}
230
231#endif
232
Mike Kellyb5fdf382019-06-11 16:35:25 +0100233inline bool IsBool(V1_0::Operand)
234{
235 return false;
236}
237
Kevin May42477c12020-03-26 13:34:14 +0000238inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100239{
240 return false;
241}
242
Kevin May42477c12020-03-26 13:34:14 +0000243#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100244
245inline bool IsBool(V1_2::Operand operand)
246{
247 return operand.type == V1_2::OperandType::BOOL;
248}
249
Sadik Armagan61113162019-07-25 09:09:40 +0100250/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000251inline bool Is12OrLaterOperand(V1_2::Operand)
252{
253 return true;
254}
255
256#endif
257
258#ifdef ARMNN_ANDROID_NN_V1_3
259
260inline bool IsBool(V1_3::Operand operand)
261{
262 return operand.type == V1_3::OperandType::BOOL;
263}
264
265/// Checks if a operand is 1_2 Operand
266inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100267{
268 return true;
269}
270
Mike Kellyb5fdf382019-06-11 16:35:25 +0100271#endif
272
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000274armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
275 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100276 armnn::TensorInfo reshapeInfo)
277{
278 armnn::ReshapeDescriptor reshapeDescriptor;
279 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
280
281 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +0100282 if (!reshapeLayer)
283 {
284 throw armnn::RuntimeException("ReshapeLayer is null");
285 }
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100286
287 // Attach the input layer to the reshape layer
288 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
289 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
290
291 return *reshapeLayer;
292}
293
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000294bool BroadcastTensor(LayerInputHandle& input0,
295 LayerInputHandle& input1,
296 armnn::IConnectableLayer* startLayer,
297 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100298{
Mike Kellye2d611e2021-10-14 12:35:58 +0100299 if (!startLayer)
300 {
301 throw armnn::RuntimeException("StartLayer is null");
302 }
arovir01b0717b52018-09-05 17:03:25 +0100303
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
305 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
306
307 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
308 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
309
310 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100311 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100312 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
313 input0.Connect(startLayer->GetInputSlot(0));
314 input1.Connect(startLayer->GetInputSlot(1));
315
Sadik Armagan64b19b52019-08-19 09:49:58 +0100316 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317 }
318
319 // Since the number of dimensions do not match then we need to add degenerate dimensions
320 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
321
322 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100323 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
324 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100325
326 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
327 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
328 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
329
330 const armnn::TensorShape& smallShape = smallInfo.GetShape();
331 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
332 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
333 {
334 reshapedDimensions[i] = smallShape[i - sizeDifference];
335 }
336
337 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100338 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100339 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100340
341 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
342 armnn::ReshapeDescriptor reshapeDescriptor;
343
344 bool isSupported = false;
345 FORWARD_LAYER_SUPPORT_FUNC(__func__,
346 IsReshapeSupported,
347 data.m_Backends,
348 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000349 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100350 reshapedInfo,
351 reshapeDescriptor);
352 if (!isSupported)
353 {
354 return false;
355 }
356
Mike Kellye2d611e2021-10-14 12:35:58 +0100357 if (!data.m_Network)
358 {
359 throw armnn::RuntimeException("Network is null");
360 }
361
Sadik Armagan64b19b52019-08-19 09:49:58 +0100362 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100363
364 if (input0IsSmaller)
365 {
366 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
367 //
368 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100369 // | |
370 // Reshape |
371 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100372 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100373
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100374 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
375 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100376 }
377 else
378 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100379 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
380 //
381 // Input0 Input1
382 // | |
383 // | Reshape
384 // \ /
385 // StartLayer
386
arovir01b0717b52018-09-05 17:03:25 +0100387 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100388 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100389 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100390
391 return true;
arovir01b0717b52018-09-05 17:03:25 +0100392}
393
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000394void CalcPadding(uint32_t input,
395 uint32_t kernel,
396 uint32_t stride,
397 uint32_t& outPadHead,
398 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100399 android::nn::PaddingScheme scheme)
400{
401 int32_t padHead;
402 int32_t padTail;
403 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100404 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
405 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100406}
407
Kevin May42477c12020-03-26 13:34:14 +0000408#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100409
410void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
411 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
412{
413 int32_t padHead;
414 int32_t padTail;
415 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100416 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
417 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100418}
419
Mike Kelly26123db2020-01-15 10:02:33 +0000420void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100421 int32_t& outPadTail, android::nn::PaddingScheme scheme)
422{
423 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
424}
425
Mike Kelly86b36d42019-07-12 16:39:33 +0100426#endif
427
Matthew Bentham912b3622019-05-03 15:49:14 +0100428Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100429{
430 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100431 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100432 shape.dimensions = operand.dimensions;
433 shape.scale = operand.scale;
434 shape.offset = operand.zeroPoint;
435 return shape;
436}
437
Kevin May42477c12020-03-26 13:34:14 +0000438#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100439
440Shape GetOperandShape(const V1_2::Operand& operand)
441{
442 Shape shape;
443 shape.type = OperandType(operand.type);
444 shape.dimensions = operand.dimensions;
445 shape.scale = operand.scale;
446 shape.offset = operand.zeroPoint;
447 return shape;
448}
449
450#endif
451
Kevin May42477c12020-03-26 13:34:14 +0000452#ifdef ARMNN_ANDROID_NN_V1_3
453
454Shape GetOperandShape(const V1_3::Operand& operand)
455{
456 Shape shape;
457 shape.type = OperandType(operand.type);
458 shape.dimensions = operand.dimensions;
459 shape.scale = operand.scale;
460 shape.offset = operand.zeroPoint;
461 return shape;
462}
463
464#endif
465
arovir01b0717b52018-09-05 17:03:25 +0100466// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
467// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100468// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
469// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100470void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000471 const armnn::TensorInfo& weightInfo,
472 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100473{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000474 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100475 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000476 // NOTE: Bias scale is always set to 0 for per-axis quantization and
477 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
478 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100479 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000480 return biasScale * inputInfo.GetQuantizationScale();
481 };
482
483 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
484 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
485
486 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100487 // bias is expected to be a 1d tensor, set qdim=0
488 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000489
490 ALOGV("Bias quantization params have been updated for per-axis quantization");
491 }
492 else
493 {
494 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
495 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
496 {
James Ward4e22f602020-10-20 15:50:33 +0100497 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000498 {
499 ALOGW("Bias quantization scale has been modified to match input * weights");
500 biasInfo.SetQuantizationScale(expectedBiasScale);
501 }
arovir01b0717b52018-09-05 17:03:25 +0100502 }
503 }
504}
505
506// 4D Tensor Permutations
507const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100508const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000509const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100510
511// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000512const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
513const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100514
515template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000516armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
517 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100518{
519 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000520 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
Mike Kellye2d611e2021-10-14 12:35:58 +0100521 if (!layer)
522 {
523 throw armnn::RuntimeException("TransposeLayer is null");
524 }
arovir01b0717b52018-09-05 17:03:25 +0100525 // Connect input to swizzle layer
526 input.Connect(layer->GetInputSlot(0));
527
528 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000529 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100530 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
531
532 return *layer;
533}
534
arovir01b0717b52018-09-05 17:03:25 +0100535bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
536 const armnn::TensorShape & outputShape,
537 uint32_t concatDim)
538{
539 // Validate the output shape is correct given the input shapes (which have just been validated)
540 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
541 if (outputShape.GetNumDimensions() != numDimensions)
542 {
543 return Fail("%s: Output shape has wrong number of dimensions", __func__);
544 }
545
546 unsigned int outputSizeAlongConcatenatedDimension = 0;
547 for (unsigned int i = 0; i < inputShapes.size(); i++)
548 {
549 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
550 }
551
552 for (unsigned int i = 0; i < numDimensions; ++i)
553 {
554 if (i == concatDim)
555 {
556 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
557 {
558 return Fail(
559 "%s: Invalid output shape for dimension %d (%d != %d)",
560 __func__,
561 i,
562 outputShape[i],
563 outputSizeAlongConcatenatedDimension);
564 }
565 }
566 else
567 {
568 if (outputShape[i] != inputShapes[0][i])
569 {
570 return Fail("%s: Invalid output shape", __func__);
571 }
572 }
573 }
574
575 return true;
576}
577
578bool RequiresReshape(armnn::TensorShape & inputShape)
579{
580 return inputShape.GetNumDimensions() < 3;
581}
582
arovir01b0717b52018-09-05 17:03:25 +0100583void SwizzleInputs(armnn::INetwork& network,
584 std::vector<LayerInputHandle>& inputs,
585 std::vector<armnn::TensorShape>& inputShapes,
586 const armnn::PermutationVector& mapping)
587{
588 if (!mapping.IsEqual(IdentityPermutation4D))
589 {
590 size_t nInputs = inputs.size();
591 for (size_t i=0; i<nInputs; ++i)
592 {
593 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000594 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100595 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
596 auto& outputInfo = outputSlot.GetTensorInfo();
597 // replace inputs with the swizzled ones
598 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
599 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
600 }
601 }
602}
603
Teresa Charlin185f5882020-04-06 21:59:18 +0100604bool TransposeInputTensors(ConversionData& data,
605 std::vector<LayerInputHandle>& inputs,
606 std::vector<armnn::TensorShape>& inputShapes,
607 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000608{
David Monahan7f492ac2020-10-16 10:36:29 +0100609 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
610 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000611 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100612 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000613 size_t nInputs = inputs.size();
614 for (size_t i=0; i<nInputs; ++i)
615 {
616 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000617 armnn::TransposeDescriptor transposeDesc;
618 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100619 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000620
621 bool isSupported = false;
622 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000623 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000624 data.m_Backends,
625 isSupported,
626 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100627 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000628 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000629 if (!isSupported)
630 {
631 return false;
632 }
633
634 }
635 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
636 }
637 return true;
638}
639
640
narpra01f176d5a2018-11-18 20:17:48 +0000641bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
642 int32_t & concatDimension,
643 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100644{
narpra01f176d5a2018-11-18 20:17:48 +0000645 bool needPermute = false;
Mike Kellye2d611e2021-10-14 12:35:58 +0100646
647 if (numberOfDimensions < 3)
648 {
649 return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
650 }
arovir01b0717b52018-09-05 17:03:25 +0100651
652 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000653 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
654 // or along dimension 0 or 2 for a 3-D tensor.
655 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100656 {
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000657 concatDimension = 3;
658 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
narpra01f176d5a2018-11-18 20:17:48 +0000659 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100660 }
narpra01f176d5a2018-11-18 20:17:48 +0000661 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100662 {
narpra01f176d5a2018-11-18 20:17:48 +0000663 concatDimension = 0;
664 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
665 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100666 }
David Monahan7f492ac2020-10-16 10:36:29 +0100667 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
668 // permutation identity to only have 3 dimensions
669 else if (numberOfDimensions == 3 && concatDimension == 2)
670 {
671 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
672 }
narpra01f176d5a2018-11-18 20:17:48 +0000673 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100674}
675
676} // anonymous namespace
677
678namespace armnn_driver
679{
680
681//// Creates an ArmNN activation layer and connects it to the given layer, if the
682//// passed in AndroidNN activation function requires so.
683//// @return The end layer of the sequence of layers built for the given AndroidNN
684//// activation function or nullptr if an error occurred (e.g. unsupported activation).
685//// Note that the end layer matches the input layer if no activation is required
686//// (the sequence of layers has length 1).
687armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
688 ActivationFn activation,
689 armnn::IConnectableLayer* prevLayer,
690 ConversionData& data);
691
692} // namespace armnn_driver
693
694///
695/// Utility templates
696///
697
698namespace armnn_driver
699{
700
701using namespace android::nn;
702
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100703template<typename HalPolicy,
704 typename HalOperand = typename HalPolicy::Operand,
705 typename HalOperation = typename HalPolicy::Operation,
706 typename HalModel = typename HalPolicy::Model>
707const HalOperand* GetInputOperand(const HalOperation& operation,
708 uint32_t inputIndex,
709 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100710 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100711{
712 if (inputIndex >= operation.inputs.size())
713 {
saoste01b8471482018-10-10 09:44:51 +0100714 if (failOnIndexOutOfBounds)
715 {
Mike Kellye2d611e2021-10-14 12:35:58 +0100716 Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
saoste01b8471482018-10-10 09:44:51 +0100717 }
arovir01b0717b52018-09-05 17:03:25 +0100718 return nullptr;
719 }
720
Kevin May42477c12020-03-26 13:34:14 +0000721 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100722 if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
723 {
724 Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
725 return nullptr;
726 }
727
Kevin May42477c12020-03-26 13:34:14 +0000728 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100729}
730
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100731template<typename HalPolicy,
732 typename HalOperand = typename HalPolicy::Operand,
733 typename HalOperation = typename HalPolicy::Operation,
734 typename HalModel = typename HalPolicy::Model>
735const HalOperand* GetOutputOperand(const HalOperation& operation,
736 uint32_t outputIndex,
737 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100738{
739 if (outputIndex >= operation.outputs.size())
740 {
741 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
742 return nullptr;
743 }
744
745 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100746 if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
747 {
748 Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
749 return nullptr;
750 }
Kevin May42477c12020-03-26 13:34:14 +0000751 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100752}
753
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100755 typename HalOperand = typename HalPolicy::Operand,
756 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100757const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100758 const HalModel& model,
759 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000760 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100761{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100762 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100763
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100765 switch (operand.lifetime)
766 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100767 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100768 {
769 // Constant found in model.operandValues
770 valueStart = &model.operandValues[operand.location.offset];
771 break;
772 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100773 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100774 {
775 // Constant specified via a Memory object
776 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
777 break;
778 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100779 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000780 {
781 // An optional input tensor with no values is not an error so should not register as a fail
782 if (optional)
783 {
784 valueStart = nullptr;
785 break;
786 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100787 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000788 }
arovir01b0717b52018-09-05 17:03:25 +0100789 default:
790 {
791 // Unsupported/invalid (e.g. can't get value of an input to the model)
792 Fail("%s: unsupported/invalid operand lifetime: %s",
793 __func__, toString(operand.lifetime).c_str());
794 valueStart = nullptr;
795 }
796 }
797
798 return valueStart;
799}
800
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100802 typename HalOperation = typename HalPolicy::Operation,
803 typename HalModel = typename HalPolicy::Model,
804 typename HalOperandType = typename HalPolicy::OperandType>
805bool GetOperandType(const HalOperation& operation,
806 uint32_t inputIndex,
807 const HalModel& model,
808 HalOperandType& type)
809{
810 using HalOperand = typename HalPolicy::Operand;
811
812 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
813 if (!operand)
814 {
815 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
816 }
817
818 type = operand->type;
819 return true;
820}
821
822template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000823 typename HalOperand = typename HalPolicy::Operand>
824bool IsOperandConstant(const HalOperand& operand)
825{
826 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
827
828 HalOperandLifeTime lifetime = operand.lifetime;
829
830 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
831 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
832 lifetime == HalOperandLifeTime::NO_VALUE;
833}
834
835template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 typename HalOperand = typename HalPolicy::Operand,
837 typename HalModel = typename HalPolicy::Model>
838ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
839 const HalModel& model,
840 const ConversionData& data,
841 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
842 const armnn::TensorShape* overrideTensorShape = nullptr,
843 bool optional = false)
844{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845 if (!IsOperandTypeSupportedForTensors(operand.type))
846 {
847 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
848 return ConstTensorPin();
849 }
850
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000851 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100852 {
853 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
854 return ConstTensorPin();
855 }
856
857 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
858 if (!valueStart)
859 {
860 if (optional)
861 {
862 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
863 return ConstTensorPin(true);
864 }
865 // mandatory tensor with no values
866 Fail("%s: failed to get operand address", __func__);
867 return ConstTensorPin();
868 }
869
870 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000871
Matthew Sloyan29cc9612021-07-16 10:21:12 +0100872 // Make sure isConstant flag is set.
873 tensorInfo.SetConstant();
874
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100875 if (overrideTensorShape != nullptr)
876 {
877 tensorInfo.SetShape(*overrideTensorShape);
878 }
879 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
880}
881
882template<typename HalPolicy,
883 typename HalOperation = typename HalPolicy::Operation,
884 typename HalModel = typename HalPolicy::Model>
885ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
886 uint32_t inputIndex,
887 const HalModel& model,
888 const ConversionData& data,
889 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
890 const armnn::TensorShape* overrideTensorShape = nullptr,
891 bool optional = false)
892{
893 using HalOperand = typename HalPolicy::Operand;
894
895 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
896 if (!operand)
897 {
898 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
899 return ConstTensorPin();
900 }
901 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
902 model,
903 data,
904 dimensionMappings,
905 overrideTensorShape,
906 optional);
907}
908
909template<typename HalPolicy,
910 typename OutputType,
911 typename HalOperandType = typename HalPolicy::OperandType,
912 typename HalOperation = typename HalPolicy::Operation,
913 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100914bool GetInputScalar(const HalOperation& operation,
915 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100916 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100917 OutputType& outValue,
918 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100919 const ConversionData& data,
920 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100921{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100922 using HalOperand = typename HalPolicy::Operand;
923
924 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100925 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100926 {
927 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
928 }
929
Sadik Armagan813f2302020-05-19 14:10:30 +0100930 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100931 {
932 return Fail("%s: unexpected operand type: %s (should be %s)",
933 __func__, toString(operand->type).c_str(), toString(type).c_str());
934 }
935
Sadik Armagan813f2302020-05-19 14:10:30 +0100936 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100937 {
938 return Fail("%s: incorrect operand location length: %i (should be %i)",
939 __func__, operand->location.length, sizeof(OutputType));
940 }
941
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100942 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100943 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100944 {
945 return Fail("%s: failed to get address for operand", __func__);
946 }
947
Sadik Armagan813f2302020-05-19 14:10:30 +0100948 if(!optional)
949 {
950 outValue = *(static_cast<const OutputType*>(valueAddress));
951 }
952
arovir01b0717b52018-09-05 17:03:25 +0100953 return true;
954}
955
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100956template<typename HalPolicy,
957 typename HalOperation = typename HalPolicy::Operation,
958 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100959bool GetInputInt32(const HalOperation& operation,
960 uint32_t inputIndex,
961 int32_t& outValue,
962 const HalModel& model,
963 const ConversionData& data)
964{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100965 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100966}
967
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100968template<typename HalPolicy,
969 typename HalOperation = typename HalPolicy::Operation,
970 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100971bool GetInputFloat32(const HalOperation& operation,
972 uint32_t inputIndex,
973 float& outValue,
974 const HalModel& model,
975 const ConversionData& data)
976{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100977 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100978}
979
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100980template<typename HalPolicy,
981 typename HalOperation = typename HalPolicy::Operation,
982 typename HalOperandType = typename HalPolicy::OperandType,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetInputActivationFunctionImpl(const HalOperation& operation,
985 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100986 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100987 ActivationFn& outActivationFunction,
988 const HalModel& model,
989 const ConversionData& data)
990{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100991 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100992 {
993 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
994 __func__,
995 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000996 toString(HalOperandType::INT32).c_str(),
997 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100998 }
999
1000 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001002 {
1003 return Fail("%s: failed to get activation input value", __func__);
1004 }
1005 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1006 return true;
1007}
1008
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001009template<typename HalPolicy,
1010 typename HalOperation = typename HalPolicy::Operation,
1011 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001012bool GetInputActivationFunction(const HalOperation& operation,
1013 uint32_t inputIndex,
1014 ActivationFn& outActivationFunction,
1015 const HalModel& model,
1016 const ConversionData& data)
1017{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001018 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1019 inputIndex,
1020 HalPolicy::OperandType::INT32,
1021 outActivationFunction,
1022 model,
1023 data);
arovir01b0717b52018-09-05 17:03:25 +01001024}
1025
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001026template<typename HalPolicy,
1027 typename HalOperation = typename HalPolicy::Operation,
1028 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001029bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1030 uint32_t inputIndex,
1031 ActivationFn& outActivationFunction,
1032 const HalModel& model,
1033 const ConversionData& data)
1034{
1035 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001036 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1037 inputIndex,
1038 HalPolicy::OperandType::INT32,
1039 outActivationFunction,
1040 model,
1041 data);
arovir01b0717b52018-09-05 17:03:25 +01001042}
1043
1044
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001045template<typename HalPolicy,
1046 typename HalOperation = typename HalPolicy::Operation,
1047 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001048bool GetOptionalInputActivation(const HalOperation& operation,
1049 uint32_t inputIndex,
1050 ActivationFn& activationFunction,
1051 const HalModel& model,
1052 const ConversionData& data)
1053{
1054 if (operation.inputs.size() <= inputIndex)
1055 {
1056 activationFunction = ActivationFn::kActivationNone;
1057 }
1058 else
1059 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001060 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001061 {
1062 return Fail("%s: Operation has invalid inputs", __func__);
1063 }
1064 }
1065 return true;
1066}
1067
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001068template<typename HalPolicy,
1069 typename ConvolutionDescriptor,
1070 typename HalOperation = typename HalPolicy::Operation,
1071 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001072bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1073 uint32_t dilationXIndex,
1074 ConvolutionDescriptor& descriptor,
1075 const HalModel& model,
1076 const ConversionData& data)
1077{
1078 bool success = true;
1079 if (operation.inputs.size() >= dilationXIndex + 2)
1080 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001081 success &= GetInputScalar<HalPolicy>(operation,
1082 dilationXIndex,
1083 HalPolicy::OperandType::INT32,
1084 descriptor.m_DilationX,
1085 model,
1086 data);
1087 success &= GetInputScalar<HalPolicy>(operation,
1088 dilationXIndex + 1,
1089 HalPolicy::OperandType::INT32,
1090 descriptor.m_DilationY,
1091 model,
1092 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001093 }
1094
1095 return success;
1096}
1097
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001099 typename HalOperation = typename HalPolicy::Operation,
1100 typename HalModel = typename HalPolicy::Model>
1101bool GetOptionalBool(const HalOperation& operation,
1102 uint32_t inputIndex,
1103 const HalModel& model,
1104 const ConversionData& data)
1105{
1106 using HalOperand = typename HalPolicy::Operand;
1107
1108 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1109 if (!operand)
1110 {
1111 return false;
1112 }
1113
1114 if (!IsBool(*operand))
1115 {
1116 return false;
1117 }
1118
1119 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1120 if (!valueAddress)
1121 {
1122 return false;
1123 }
1124
1125 if (*(static_cast<const bool*>(valueAddress)))
1126 {
1127 return true;
1128 }
1129 else
1130 {
1131 return false;
1132 }
1133}
1134
1135template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001136 typename HalOperand = typename HalPolicy::Operand,
1137 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001138bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001139 std::vector<int32_t>& outValues,
1140 const HalModel& model,
1141 const ConversionData& data)
1142{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001143 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001144 {
1145 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1146 }
1147
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001148 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001149 if (!startAddress)
1150 {
1151 return Fail("%s: failed to get operand address", __func__, operand.type);
1152 }
1153
1154 // Check number of bytes is sensible
1155 const uint32_t numBytes = operand.location.length;
1156 if (numBytes % sizeof(int32_t) != 0)
1157 {
1158 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1159 __func__, numBytes, sizeof(int32_t));
1160 }
1161
1162 outValues.resize(numBytes / sizeof(int32_t));
1163 memcpy(outValues.data(), startAddress, numBytes);
1164 return true;
1165}
1166
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001167template<typename HalPolicy,
1168 typename HalOperation = typename HalPolicy::Operation,
1169 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001170bool GetInputPaddingScheme(const HalOperation& operation,
1171 uint32_t inputIndex,
1172 PaddingScheme& outPaddingScheme,
1173 const HalModel& model,
1174 const ConversionData& data)
1175{
1176 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001178 {
1179 return Fail("%s: failed to get padding scheme input value", __func__);
1180 }
1181
1182 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1183 return true;
1184}
1185
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001186template<typename HalPolicy,
1187 typename HalOperation = typename HalPolicy::Operation,
1188 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001189LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1190 uint32_t inputIndex,
1191 const HalModel& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001192 ConversionData& data,
1193 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
arovir01b0717b52018-09-05 17:03:25 +01001194{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001195 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001196 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001197 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1198
1199 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001200 if (!operand)
1201 {
1202 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1203 return LayerInputHandle();
1204 }
1205
1206 if (!IsOperandTypeSupportedForTensors(operand->type))
1207 {
1208 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1209 return LayerInputHandle();
1210 }
1211
Sadik Armagan44bcc022019-06-18 17:21:36 +01001212 try
arovir01b0717b52018-09-05 17:03:25 +01001213 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001214 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001215 if (IsDynamicTensor(operandTensorInfo))
1216 {
1217 Fail("%s: dynamic input tensors are not supported", __func__);
1218 return LayerInputHandle();
1219 }
arovir01b0717b52018-09-05 17:03:25 +01001220
Sadik Armagan44bcc022019-06-18 17:21:36 +01001221 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001222 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001223 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001224 {
1225 // NOTE: We must check whether we can support the input tensor on at least one
1226 // of the provided backends; otherwise we cannot convert the operation
1227 bool isInputSupported = false;
1228 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1229 IsInputSupported,
1230 data.m_Backends,
1231 isInputSupported,
1232 operandTensorInfo);
1233
1234 if (!isInputSupported)
1235 {
1236 Fail("%s: unsupported input tensor", __func__);
1237 return LayerInputHandle();
1238 }
1239
James Ward4e22f602020-10-20 15:50:33 +01001240 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001241 }
1242 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001243 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001244 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001245 // The tensor is either an operand internal to the model, or a model input.
1246 // It can be associated with an ArmNN output slot for an existing layer.
1247
1248 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1249 const uint32_t operandIndex = operation.inputs[inputIndex];
1250 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001251 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001252 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001253 case HalOperandLifeTime::CONSTANT_REFERENCE:
1254 {
1255 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001256 ConstTensorPin tensorPin =
1257 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1258
Sadik Armagan44bcc022019-06-18 17:21:36 +01001259 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001260 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001261 bool isSupported = false;
1262 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1263 IsConstantSupported,
1264 data.m_Backends,
1265 isSupported,
1266 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001267 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001268 {
1269 return LayerInputHandle();
1270 }
1271
1272 armnn::IConnectableLayer* constantLayer =
1273 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1274 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001275 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1276 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001277
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001278 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001279 }
1280 else
1281 {
1282 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001283 return LayerInputHandle();
1284 }
arovir01b0717b52018-09-05 17:03:25 +01001285 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001286 default:
arovir01b0717b52018-09-05 17:03:25 +01001287 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001288 // Unsupported lifetime for an input tensor
1289 Fail("%s: unsupported lifetime for input tensor: %s",
1290 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001291 return LayerInputHandle();
1292 }
arovir01b0717b52018-09-05 17:03:25 +01001293 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001294 }
1295 catch (UnsupportedOperand<HalOperandType>& e)
1296 {
1297 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1298 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001299 }
1300}
1301
Kevin May42477c12020-03-26 13:34:14 +00001302
1303#ifdef ARMNN_ANDROID_NN_V1_3
1304template<typename HalPolicy>
1305LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1306 uint32_t inputIndex,
1307 const::android::hardware::neuralnetworks::V1_3::Model& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001308 ConversionData& data,
1309 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
Kevin May42477c12020-03-26 13:34:14 +00001310{
1311 using HalOperand = typename HalPolicy::Operand;
1312 using HalOperandType = typename HalPolicy::OperandType;
1313 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1314
1315 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1316 if (!operand)
1317 {
1318 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1319 return LayerInputHandle();
1320 }
1321
1322 if (!IsOperandTypeSupportedForTensors(operand->type))
1323 {
1324 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1325 return LayerInputHandle();
1326 }
1327
1328 try
1329 {
1330 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001331
Kevin May42477c12020-03-26 13:34:14 +00001332 if (IsDynamicTensor(operandTensorInfo))
1333 {
Finn Williams291a16b2020-08-19 22:54:00 +01001334 data.m_DynamicInputsEncountered = true;
1335
Finn Williams9a044412020-08-17 19:08:35 +01001336 const uint32_t operandIndex = operation.inputs[inputIndex];
1337
1338 // Check if the dynamic input tensors have been inferred by one of the previous layers
1339 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001340 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001341 {
1342 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1343 }
1344 else
1345 {
1346 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1347 return LayerInputHandle();
1348 }
Kevin May42477c12020-03-26 13:34:14 +00001349 }
1350
1351 switch (operand->lifetime)
1352 {
1353 case HalOperandLifeTime::SUBGRAPH_INPUT:
1354 {
1355 // NOTE: We must check whether we can support the input tensor on at least one
1356 // of the provided backends; otherwise we cannot convert the operation
1357 bool isInputSupported = false;
1358 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1359 IsInputSupported,
1360 data.m_Backends,
1361 isInputSupported,
1362 operandTensorInfo);
1363
1364 if (!isInputSupported)
1365 {
1366 Fail("%s: unsupported input tensor", __func__);
1367 return LayerInputHandle();
1368 }
1369
James Ward4e22f602020-10-20 15:50:33 +01001370 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001371 }
1372 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1373 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1374 {
1375 // The tensor is either an operand internal to the model, or a model input.
1376 // It can be associated with an ArmNN output slot for an existing layer.
1377
1378 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1379 const uint32_t operandIndex = operation.inputs[inputIndex];
1380 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1381 }
1382 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1383 case HalOperandLifeTime::CONSTANT_REFERENCE:
1384 {
1385 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001386 ConstTensorPin tensorPin =
1387 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1388
Kevin May42477c12020-03-26 13:34:14 +00001389 if (tensorPin.IsValid())
1390 {
1391 bool isSupported = false;
1392 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1393 IsConstantSupported,
1394 data.m_Backends,
1395 isSupported,
1396 tensorPin.GetConstTensor().GetInfo());
1397 if (!isSupported)
1398 {
1399 return LayerInputHandle();
1400 }
1401
1402 armnn::IConnectableLayer* constantLayer =
1403 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1404 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001405 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1406 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001407
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001408 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001409 }
1410 else
1411 {
1412 Fail("%s: invalid operand tensor", __func__);
1413 return LayerInputHandle();
1414 }
1415 break;
1416 }
1417 default:
1418 {
1419 // Unsupported lifetime for an input tensor
1420 Fail("%s: unsupported lifetime for input tensor: %s",
1421 __func__, toString(operand->lifetime).c_str());
1422 return LayerInputHandle();
1423 }
1424 }
1425 }
1426 catch (UnsupportedOperand<HalOperandType>& e)
1427 {
1428 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1429 return LayerInputHandle();
1430 }
1431}
1432#endif
1433
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001434template<typename HalPolicy,
1435 typename HalOperation = typename HalPolicy::Operation,
1436 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001437bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1438 uint32_t operationOutputIndex,
1439 armnn::IConnectableLayer& layer,
1440 uint32_t layerOutputIndex,
1441 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001442 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001443 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001444 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001445 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001446 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001447{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001448 using HalOperand = typename HalPolicy::Operand;
1449
1450 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001451 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1452 {
1453 return false;
1454 }
1455
1456 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001457 if (overrideOutputInfo == nullptr)
1458 {
1459 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1460 }
1461 else
1462 {
1463 outputSlot.SetTensorInfo(*overrideOutputInfo);
1464 }
1465
Finn Williamsa4983ce2020-07-23 12:55:12 +01001466 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001467 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001468 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001469 // Type one dynamic tensors require the previous layer's output shape for inference
1470 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1471 {
Mike Kellye2d611e2021-10-14 12:35:58 +01001472 if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
Sadik Armagandbda4b72020-09-03 11:33:07 +01001473 {
1474 return false;
1475 }
1476 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001477 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001478 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001479 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001480 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1481
Sadik Armagandbda4b72020-09-03 11:33:07 +01001482 if(!isSupported)
1483 {
1484 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1485 {
1486 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1487 }
1488 return false;
1489 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001490 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001491
Finn Williamsa4983ce2020-07-23 12:55:12 +01001492 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001493
1494 if (activationFunction != ActivationFn::kActivationNone)
1495 {
1496 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1497 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1498 &layer, data);
1499
1500 if (!endLayer)
1501 {
1502 return Fail("%s: ProcessActivation failed", __func__);
1503 }
1504
1505 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1506 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1507 }
1508 else
1509 {
1510 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1511 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001512
Mike Kellyb5fdf382019-06-11 16:35:25 +01001513 return true;
1514}
1515
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001516template<typename HalPolicy,
1517 typename HalOperation = typename HalPolicy::Operation,
1518 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001519armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1520 uint32_t inputIndex,
1521 const HalModel& model,
1522 ConversionData& data)
1523{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001524 using HalOperand = typename HalPolicy::Operand;
1525
1526 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001527 if (!operand)
1528 {
1529 return armnn::DataLayout::NHWC;
1530 }
1531
1532 if (!IsBool(*operand))
1533 {
1534 return armnn::DataLayout::NHWC;
1535 }
1536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001537 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001538 if (!valueAddress)
1539 {
1540 return armnn::DataLayout::NHWC;
1541 }
1542
1543 if (*(static_cast<const bool*>(valueAddress)))
1544 {
1545 return armnn::DataLayout::NCHW;
1546 }
1547 else
1548 {
1549 return armnn::DataLayout::NHWC;
1550 }
1551}
1552
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001553template<typename HalPolicy,
1554 typename HalOperation = typename HalPolicy::Operation,
1555 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001556bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1557 uint32_t outputIndex,
1558 armnn::IConnectableLayer& layer,
1559 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001560 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001561 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001562 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1563 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001564{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001565 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1566 outputIndex,
1567 layer,
1568 outputIndex,
1569 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001570 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001571 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001572 validateFunc,
1573 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001574}
1575
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001576template<typename HalPolicy,
1577 typename HalOperation = typename HalPolicy::Operation,
1578 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001579bool ConvertToActivation(const HalOperation& operation,
1580 const char* operationName,
1581 const armnn::ActivationDescriptor& activationDesc,
1582 const HalModel& model,
1583 ConversionData& data)
1584{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001585 using HalOperand = typename HalPolicy::Operand;
1586
1587 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001588 if (!input.IsValid())
1589 {
1590 return Fail("%s: Input 0 is invalid", operationName);
1591 }
1592
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001593 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001594 if (!outputOperand)
1595 {
1596 return false;
1597 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001598
1599 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001600
1601 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001602
1603 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1604 {
1605 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1606 IsActivationSupported,
1607 data.m_Backends,
1608 isSupported,
1609 input.GetTensorInfo(),
1610 outInfo,
1611 activationDesc);
1612 };
1613
1614 if(IsDynamicTensor(outInfo))
1615 {
1616 isSupported = AreDynamicTensorsSupported();
1617 }
1618 else
1619 {
1620 validateFunc(outInfo, isSupported);
1621 }
1622
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001623 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001624 {
1625 return false;
1626 }
1627
1628 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01001629 if (!layer)
1630 {
1631 return Fail("%s: Could not add the ActivationLayer", __func__);
1632 }
arovir01b0717b52018-09-05 17:03:25 +01001633 input.Connect(layer->GetInputSlot(0));
1634
Finn Williamsa4983ce2020-07-23 12:55:12 +01001635 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001636}
1637
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001638template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001639 typename HalOperation = typename HalPolicy::Operation,
1640 typename HalModel = typename HalPolicy::Model>
1641bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1642{
1643 armnn::ActivationDescriptor desc;
1644 desc.m_Function = armnn::ActivationFunction::ReLu;
1645
1646 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1647}
1648
1649template<typename HalPolicy,
1650 typename HalOperation = typename HalPolicy::Operation,
1651 typename HalModel = typename HalPolicy::Model>
1652bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1653{
1654 armnn::ActivationDescriptor desc;
1655 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1656 desc.m_A = 1.0f;
1657 desc.m_B = -1.0f;
1658
1659 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1660}
1661
1662template<typename HalPolicy,
1663 typename HalOperation = typename HalPolicy::Operation,
1664 typename HalModel = typename HalPolicy::Model>
1665bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1666{
1667 armnn::ActivationDescriptor desc;
1668 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1669 desc.m_A = 6.0f;
1670
1671 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1672}
1673
1674template<typename HalPolicy,
1675 typename HalOperation = typename HalPolicy::Operation,
1676 typename HalModel = typename HalPolicy::Model>
1677bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1678{
1679 armnn::ActivationDescriptor desc;
1680 desc.m_Function = armnn::ActivationFunction::TanH;
1681 desc.m_A = 1.0f; // android nn does not support tanH parameters
1682 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1683
1684 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1685}
1686
1687template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001688 typename HalOperation = typename HalPolicy::Operation,
1689 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001690bool ConvertPaddings(const HalOperation& operation,
1691 const HalModel& model,
1692 ConversionData& data,
1693 unsigned int rank,
1694 armnn::PadDescriptor& padDescriptor)
1695{
1696 using HalOperand = typename HalPolicy::Operand;
1697
1698 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1699 if (!paddingsOperand)
1700 {
1701 return Fail("%s: Could not read paddings operand", __func__);
1702 }
1703
1704 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1705 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1706 {
1707 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1708 }
1709
1710 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001711 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1712 {
1713 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1714 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001715
1716 // add padding for each dimension of input tensor.
1717 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1718 {
1719 int paddingBeforeInput = paddings[i];
1720 int paddingAfterInput = paddings[i + 1];
1721
1722 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1723 {
1724 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1725 }
1726
1727 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1728 }
1729
1730 return true;
1731}
1732
1733template<typename HalPolicy,
1734 typename HalOperation = typename HalPolicy::Operation,
1735 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001736bool ConvertPooling2d(const HalOperation& operation,
1737 const char* operationName,
1738 armnn::PoolingAlgorithm poolType,
1739 const HalModel& model,
1740 ConversionData& data)
1741{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001742 using HalOperand = typename HalPolicy::Operand;
1743 using HalOperandType = typename HalPolicy::OperandType;
1744
1745 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001746 if (!input.IsValid())
1747 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001748 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001749 }
1750
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001751 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001752 if (!output)
1753 {
1754 return Fail("%s: Could not read output 0", __func__);
1755 }
1756
1757 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1758 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1759
arovir01b0717b52018-09-05 17:03:25 +01001760 armnn::Pooling2dDescriptor desc;
1761 desc.m_PoolType = poolType;
1762 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001763 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001764
1765 ActivationFn activation;
1766
Sadik Armagan15d63e22019-07-26 16:59:35 +01001767 auto inputSize = operation.inputs.size();
1768
1769 if (inputSize >= 10)
1770 {
1771 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1772 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1773 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1774 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1775 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1776 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1777 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1778 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1779 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1780 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1781 {
1782 return Fail("%s: Operation has invalid inputs", operationName);
1783 }
1784
Kevin May42477c12020-03-26 13:34:14 +00001785 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001786 {
1787 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1788 }
1789 }
1790 else
arovir01b0717b52018-09-05 17:03:25 +01001791 {
1792 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1793 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001794 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1795 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1796 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1797 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1798 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1799 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001800 {
1801 return Fail("%s: Operation has invalid inputs", operationName);
1802 }
1803
Kevin May42477c12020-03-26 13:34:14 +00001804 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001805 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001806 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001807 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001808
1809 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1810 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1811 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1812
1813 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1814 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001815 }
1816
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001817 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001818
1819 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1820 {
1821 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1822 IsPooling2dSupported,
1823 data.m_Backends,
1824 isSupported,
1825 inputInfo,
1826 outputInfo,
1827 desc);
1828
1829 };
1830
1831 if(IsDynamicTensor(outputInfo))
1832 {
1833 isSupported = AreDynamicTensorsSupported();
1834 }
1835 else
1836 {
1837 validateFunc(outputInfo, isSupported);
1838 }
1839
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001840 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001841 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001842 return false;
arovir01b0717b52018-09-05 17:03:25 +01001843 }
arovir01b0717b52018-09-05 17:03:25 +01001844
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001845 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1846 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001847 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001848 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001849 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001850
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001851 input.Connect(pooling2dLayer->GetInputSlot(0));
1852
Finn Williamsa4983ce2020-07-23 12:55:12 +01001853 if (!isSupported)
1854 {
1855 return false;
1856 }
1857
Kevin Mayfcf2a152020-09-08 16:06:32 +01001858 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1859 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001860}
1861
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001862template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001863 typename HalOperation = typename HalPolicy::Operation,
1864 typename HalModel = typename HalPolicy::Model>
1865bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001866{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001867 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001868
1869 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1870 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1871
1872 if (!input0.IsValid() || !input1.IsValid())
1873 {
1874 return Fail("%s: Operation has invalid inputs", __func__);
1875 }
1876
1877 // The FuseActivation parameter is always the input index 2
1878 // and it should be optional
1879 ActivationFn activationFunction;
1880 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1881 {
1882 return Fail("%s: Operation has invalid inputs", __func__);
1883 }
1884
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001885 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001886 if (!outputOperand)
1887 {
1888 return false;
1889 }
1890
1891 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1892 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1893
1894 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001895
1896 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001897 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1898 {
1899 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1900 IsAdditionSupported,
1901 data.m_Backends,
1902 isSupported,
1903 inputInfo0,
1904 inputInfo1,
1905 outputInfo);
1906 };
1907
1908 if(!IsDynamicTensor(outputInfo))
1909 {
1910 validateFunc(outputInfo, isSupported);
1911 }
1912 else
1913 {
1914 isSupported = AreDynamicTensorsSupported();
1915 }
1916
Mike Kelly46272802019-08-14 17:00:48 +01001917 if (!isSupported)
1918 {
1919 return false;
1920 }
1921
1922 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001923
Kevin Mayfcf2a152020-09-08 16:06:32 +01001924 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1925 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001926 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001927 return false;
1928 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001929
Kevin Mayfcf2a152020-09-08 16:06:32 +01001930 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1931 data, nullptr, validateFunc, activationFunction);
1932
Mike Kelly46272802019-08-14 17:00:48 +01001933}
1934
1935template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001936 typename HalOperation = typename HalPolicy::Operation,
1937 typename HalModel = typename HalPolicy::Model>
1938bool ConvertArgMinMax(const HalOperation& operation,
1939 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001940 ConversionData& data,
1941 armnn::ArgMinMaxFunction argMinMaxFunction)
1942{
1943 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1944
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001945 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001946 using HalOperandType = typename HalPolicy::OperandType;
1947
1948 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1949
1950 if (!input0.IsValid())
1951 {
1952 return Fail("%s: Operation has invalid inputs", __func__);
1953 }
1954
1955 int32_t axis;
1956 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1957 {
1958 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1959 }
1960
1961 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1962 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1963
1964 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1965 {
1966 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1967 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1968 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1969 return Fail("%s: Axis must be in range [-n, n)", __func__);
1970 }
1971
1972 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1973 if (!output)
1974 {
1975 return Fail("%s: Could not read output 0", __func__);
1976 }
1977
1978 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1979
1980 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001981
1982 armnn::ArgMinMaxDescriptor descriptor;
1983 descriptor.m_Function = argMinMaxFunction;
1984 descriptor.m_Axis = axis;
1985
1986 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001987
1988 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1989 {
1990 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1991 IsArgMinMaxSupported,
1992 data.m_Backends,
1993 isSupported,
1994 inputInfo0,
1995 outputInfo,
1996 descriptor);
1997 };
1998
1999 if(IsDynamicTensor(outputInfo))
2000 {
2001 isSupported = AreDynamicTensorsSupported();
2002 }
2003 else
2004 {
2005 validateFunc(outputInfo, isSupported);
2006 }
2007
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002008 if (!isSupported)
2009 {
2010 return false;
2011 }
2012
2013 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002014 if (!layer)
2015 {
2016 return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
2017 }
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002018 input0.Connect(layer->GetInputSlot(0));
2019
Finn Williamsa4983ce2020-07-23 12:55:12 +01002020 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002021}
2022
2023template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002024 typename HalOperation = typename HalPolicy::Operation,
2025 typename HalModel = typename HalPolicy::Model>
2026bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01002027{
Keith Davis6e4081f2020-09-03 13:17:21 +01002028 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01002029 using HalOperandType = typename HalPolicy::OperandType;
2030
2031 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
2032 if (operation.inputs.size() <= 1)
2033 {
2034 return Fail("%s: Operation has insufficient arguments", __func__);
2035 }
2036
2037 // Get inputs and outputs
2038 const std::size_t numInputTensors = operation.inputs.size() - 1;
2039
2040 int32_t concatDim;
2041 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
2046 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2047 if (!outputOperand)
2048 {
2049 return Fail("%s: Operation has no outputs", __func__);
2050 }
2051
Keith Davis6e4081f2020-09-03 13:17:21 +01002052 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2053 armnn::TensorShape outputShape = outputInfo.GetShape();
2054 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002055 //
2056 // handle negative concat dims along the lines of tensorflow as described here:
2057 // https://www.tensorflow.org/api_docs/python/tf/concat
2058 // "negative axis refers to axis + rank(values)-th dimension"
2059 //
2060 if (concatDim < 0)
2061 {
2062 concatDim += outputShape.GetNumDimensions();
2063 }
2064
2065 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2066 {
2067 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2068 }
2069
2070 std::vector<LayerInputHandle> inputHandles;
2071 std::vector<armnn::TensorShape> inputShapes;
2072
2073 inputHandles.reserve(numInputTensors);
2074 inputShapes.reserve(numInputTensors);
2075
Keith Davis6e4081f2020-09-03 13:17:21 +01002076 bool inputsHaveBeenReshaped = false;
2077 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002078 for (uint32_t i = 0; i < numInputTensors; ++i)
2079 {
2080 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2081 if (!operand)
2082 {
2083 return Fail("%s: Operation has invalid inputs", __func__);
2084 }
2085
Teresa Charlin3b959602019-10-31 17:05:47 +00002086 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2087 if (!operandInputHandle.IsValid())
2088 {
2089 return Fail("%s: Operation has invalid inputs", __func__);
2090 }
Mike Kellyb8805202019-07-31 17:25:43 +01002091
Keith Davis6e4081f2020-09-03 13:17:21 +01002092 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002093 if (operandShape.GetNumDimensions() == 0)
2094 {
2095 return Fail("%s: Operands with rank 0 are not supported", __func__);
2096 }
2097
2098 if (RequiresReshape(operandShape))
2099 {
2100 inputsHaveBeenReshaped = true;
2101
2102 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2103
2104 // Expand the tensor to three dimensions
2105 if (operandShape.GetNumDimensions() == 2)
2106 {
2107 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2108 tensorDimensionsAdded = 1;
2109 }
2110 else
2111 {
2112 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2113 tensorDimensionsAdded = 2;
2114 }
2115
Kevin Mayaed08ac2019-12-12 16:33:31 +00002116 armnn::ReshapeDescriptor reshapeDescriptor;
2117 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2118
2119 bool isSupported = false;
2120 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2121 IsReshapeSupported,
2122 data.m_Backends,
2123 isSupported,
2124 operandInputHandle.GetTensorInfo(),
2125 reshapeInfo,
2126 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002127
Kevin Mayaed08ac2019-12-12 16:33:31 +00002128 if (!isSupported)
2129 {
2130 return false;
2131 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002132 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002133
2134 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002135 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002136 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2137 }
2138
2139 inputShapes.emplace_back(operandShape);
2140 inputHandles.emplace_back(operandInputHandle);
2141
2142 if (!inputHandles.back().IsValid())
2143 {
2144 return Fail("%s: Operation has invalid inputs", __func__);
2145 }
2146 }
2147
Mike Kellye2d611e2021-10-14 12:35:58 +01002148 if (inputShapes.size() != inputHandles.size())
2149 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002150 return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002151 inputShapes.size(), inputHandles.size());
2152 }
Mike Kellyb8805202019-07-31 17:25:43 +01002153
2154 if (inputsHaveBeenReshaped)
2155 {
2156 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2157 concatDim += tensorDimensionsAdded;
2158
2159 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2160 if (tensorDimensionsAdded == 1)
2161 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002162 if (IsDynamicTensor(outputInfo))
2163 {
2164 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2165 }
2166 else
2167 {
2168 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2169 }
Mike Kellyb8805202019-07-31 17:25:43 +01002170 }
2171 else if (tensorDimensionsAdded == 2)
2172 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002173 if (IsDynamicTensor(outputInfo))
2174 {
2175 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2176 }
2177 else
2178 {
2179 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2180 }
Mike Kellyb8805202019-07-31 17:25:43 +01002181 }
2182 }
2183
2184 // Check if permutations is required and get the pair of permutations required for the concatenation.
2185 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2186 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002187 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002188 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2189 concatDim,
2190 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002191
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2193 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002194 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002195 if (needPermute)
2196 {
2197 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2198 }
2199
2200 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002201 }
Mike Kellyb8805202019-07-31 17:25:43 +01002202 // this is no-op for identity swizzles, otherwise it replaces both
2203 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002204 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002205 {
2206 return false;
2207 }
Mike Kellyb8805202019-07-31 17:25:43 +01002208
2209 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2210 armnn::OriginsDescriptor concatDescriptor;
2211
2212 try
2213 {
2214 // The concat descriptor is always created across the only supported concat dimension
2215 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002216 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2217 inputShapes.end(),
2218 concatDim);
2219 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002220 {
2221 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2222 }
2223
2224 // Validate the output shape is correct given the input shapes based on the
2225 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002226 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002227 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002228 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2229 {
2230 return Fail("%s: Error validating the output shape for concat", __func__);
2231 }
Mike Kellyb8805202019-07-31 17:25:43 +01002232 }
2233
2234 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2235 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002236 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002237
Keith Davis6e4081f2020-09-03 13:17:21 +01002238 bool isSupported = false;
2239 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2240 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2241 outputInfo, concatDescriptor);
2242 };
2243
2244 if (!isDynamicTensor)
2245 {
2246 validateFunc(outputInfo, isSupported);
2247 }
2248 else
2249 {
2250 isSupported = AreDynamicTensorsSupported();
2251 }
2252
Mike Kellyb8805202019-07-31 17:25:43 +01002253 if (!isSupported)
2254 {
2255 return false;
2256 }
2257
2258 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002259 if (!layer)
2260 {
2261 return Fail("%s: Could not add the ConcatLayer", __func__);
2262 }
Mike Kellyb8805202019-07-31 17:25:43 +01002263 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002264 // Connect inputs to the layer
2265 const int numInputSlots = layer->GetNumInputSlots();
Mike Kellye2d611e2021-10-14 12:35:58 +01002266
2267 if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2268 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002269 return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002270 static_cast<std::size_t>(numInputSlots), inputHandles.size());
2271 }
Mike Kellyb8805202019-07-31 17:25:43 +01002272 for (int i = 0; i < numInputSlots; ++i)
2273 {
2274 // connect the input directly to the merge (concat) layer
Mike Kelly1b46d132021-11-03 11:12:45 +00002275 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
Mike Kellyb8805202019-07-31 17:25:43 +01002276 }
2277
Keith Davis6e4081f2020-09-03 13:17:21 +01002278 // Transpose the output shape
2279 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002280 armnn::TransposeDescriptor transposeDesc;
2281 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002282 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2283 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2284 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002285 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002286 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002287 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002288 data.m_Backends,
2289 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002290 inputTransposeInfo,
2291 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002292 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002293 if (!isSupported)
2294 {
2295 return false;
2296 }
Mike Kellyb8805202019-07-31 17:25:43 +01002297 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002298 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002299 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002300 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002301
2302 return true;
2303 };
2304
2305 if (needPermute && !isDynamicTensor)
2306 {
2307 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002308 }
2309
2310 if (inputsHaveBeenReshaped)
2311 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002312 if (isDynamicTensor)
2313 {
2314 // Infer the output shapes of concat if outputs are type 1 dynamic
Mike Kellye2d611e2021-10-14 12:35:58 +01002315 if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2316 {
2317 return Fail("%s: TensorInfo is not set", __func__);
2318 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002319 if (!ValidateConcatOutputShape(inputShapes,
2320 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2321 concatDim))
2322 {
2323 return Fail("%s: Error validating the output shape for concat", __func__);
2324 }
2325 transposeOutputShape();
2326 }
2327
Mike Kellyb8805202019-07-31 17:25:43 +01002328 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002329 // Undo the reshape knowing the amount of dimensions added
2330 if (tensorDimensionsAdded == 1)
2331 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002332 afterConcatInfo.SetShape(
2333 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002334 }
2335 else if (tensorDimensionsAdded == 2)
2336 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002337 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002338 }
2339
Kevin Mayaed08ac2019-12-12 16:33:31 +00002340 armnn::ReshapeDescriptor reshapeDescriptor;
2341 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002342 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002343
Keith Davis6e4081f2020-09-03 13:17:21 +01002344 isSupported = false;
2345 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2346 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2347 IsReshapeSupported,
2348 data.m_Backends,
2349 isSupported,
2350 concatInfo,
2351 afterConcatInfo,
2352 reshapeDescriptor);
2353 };
2354
2355 if (!IsDynamicTensor(afterConcatInfo))
2356 {
2357 validateReshapeFunc(afterConcatInfo, isSupported);
2358 }
2359 else
2360 {
2361 isSupported = AreDynamicTensorsSupported();
2362 }
2363
Kevin Mayaed08ac2019-12-12 16:33:31 +00002364 if (!isSupported)
2365 {
2366 return false;
2367 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002368 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2369 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2370 0,
2371 *layer,
2372 model,
2373 data,
2374 nullptr,
2375 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002376 }
2377
Keith Davis6e4081f2020-09-03 13:17:21 +01002378 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002379}
2380
2381template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002382 typename HalOperation = typename HalPolicy::Operation,
2383 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2385{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002386 using HalOperand = typename HalPolicy::Operand;
2387 using HalOperandType = typename HalPolicy::OperandType;
2388
2389 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002390 if (!input.IsValid())
2391 {
2392 return Fail("%s: Operation has invalid inputs", __func__);
2393 }
2394
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002395 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002396 if (!output)
2397 {
2398 return Fail("%s: Could not read output 0", __func__);
2399 }
2400
2401 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002402 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002403
Keith Davis8f22bed2022-04-29 10:57:27 +01002404 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2405 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002406 {
2407 return Fail("%s: Operation has invalid inputs", __func__);
2408 }
2409
Keith Davis8f22bed2022-04-29 10:57:27 +01002410 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2411 if (!biasInput.IsValid())
2412 {
2413 return Fail("%s: Operation has invalid inputs", __func__);
2414 }
2415
2416 biasInput.SanitizeQuantizationScale(weightsInput, input);
2417 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2418 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002419
2420 armnn::Convolution2dDescriptor desc;
2421 desc.m_DataLayout = armnn::DataLayout::NHWC;
2422 ActivationFn activation;
2423
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002424 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002425 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002426 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2427 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2428 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2429 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2430 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2431 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002432 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002433 {
2434 return Fail("%s: Operation has invalid inputs", __func__);
2435 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002436 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002437 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002438 {
2439 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002440 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2441 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2442 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002443 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002444 {
2445 return Fail("%s: Operation has invalid inputs", __func__);
2446 }
2447
Keith Davis8f22bed2022-04-29 10:57:27 +01002448 const uint32_t kernelX = weightsInfo.GetShape()[2];
2449 const uint32_t kernelY = weightsInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002450 const uint32_t inputX = inputInfo.GetShape()[2];
2451 const uint32_t inputY = inputInfo.GetShape()[1];
2452
2453 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2454 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002455 }
2456 else
2457 {
2458 return Fail("%s: Unsupported number of operation inputs", __func__);
2459 }
2460
2461 desc.m_BiasEnabled = true;
Keith Davis8f22bed2022-04-29 10:57:27 +01002462 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002463
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002464 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002465 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2466 {
2467 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2468 IsConvolution2dSupported,
2469 data.m_Backends,
2470 isSupported,
2471 inputInfo,
2472 outputInfo,
2473 desc,
Keith Davis8f22bed2022-04-29 10:57:27 +01002474 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002475 biases);
2476 };
2477
2478 if(!IsDynamicTensor(outputInfo))
2479 {
2480 validateFunc(outputInfo, isSupported);
2481 }
2482 else
2483 {
2484 isSupported = AreDynamicTensorsSupported();
2485 }
2486
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002487 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002488 {
2489 return false;
2490 }
2491
Keith Davis8f22bed2022-04-29 10:57:27 +01002492 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002493
2494 if (!startLayer)
2495 {
2496 return Fail("%s: AddConvolution2dLayer failed", __func__);
2497 }
2498
Mike Kellyb5fdf382019-06-11 16:35:25 +01002499 input.Connect(startLayer->GetInputSlot(0));
2500
Keith Davis8f22bed2022-04-29 10:57:27 +01002501 // Connect weights and bias inputs
2502 weightsInput.Connect(startLayer->GetInputSlot(1));
2503 biasInput.Connect(startLayer->GetInputSlot(2));
2504
Kevin Mayfcf2a152020-09-08 16:06:32 +01002505 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2506 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002507}
2508
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002509template<typename HalPolicy,
2510 typename HalOperation = typename HalPolicy::Operation,
2511 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002512bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2513{
2514 using HalOperand = typename HalPolicy::Operand;
2515 using HalOperandType = typename HalPolicy::OperandType;
2516
2517 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2518 if (!input.IsValid() )
2519 {
2520 return Fail("%s: Operation has invalid inputs", __func__);
2521 }
2522
2523 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2524 unsigned int rank = inputInfo.GetNumDimensions();
2525 if (rank != 4)
2526 {
2527 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2528 }
2529
2530 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2531 if (!output)
2532 {
2533 return Fail("%s: Could not read output 0", __func__);
2534 }
2535
2536 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002537
2538 armnn::DepthToSpaceDescriptor descriptor;
2539
2540 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2541 if (descriptor.m_BlockSize <= 1)
2542 {
2543 return Fail("%s: Block size must be at least 1 in all dimensions");
2544 }
2545
2546 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002547 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002548 {
2549 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2550 }
2551
2552 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002553 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2554 {
2555 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2556 IsDepthToSpaceSupported,
2557 data.m_Backends,
2558 isSupported,
2559 inputInfo,
2560 outputInfo,
2561 descriptor);
2562 };
2563
2564 if(!IsDynamicTensor(outputInfo))
2565 {
2566 validateFunc(outputInfo, isSupported);
2567 }
2568 else
2569 {
2570 isSupported = AreDynamicTensorsSupported();
2571 }
2572
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002573 if (!isSupported)
2574 {
2575 return false;
2576 }
2577
2578 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002579 if (!layer)
2580 {
2581 return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2582 }
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002583 input.Connect(layer->GetInputSlot(0));
2584
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002585 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002586}
2587
2588template<typename HalPolicy,
2589 typename HalOperation = typename HalPolicy::Operation,
2590 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002591bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2592{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002593 using HalOperand = typename HalPolicy::Operand;
2594 using HalOperandType = typename HalPolicy::OperandType;
2595
2596 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002597
2598 if (!input.IsValid())
2599 {
2600 return Fail("%s: Operation has invalid inputs", __func__);
2601 }
2602
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002603 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002604
2605 if (!output)
2606 {
2607 return Fail("%s: Could not read output 0", __func__);
2608 }
2609
2610 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002611 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002612
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002613 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002614 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002615 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Cathal Corbett915f2a72022-04-15 14:12:08 +01002616 if (!weightsOperand)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002617 {
Cathal Corbett915f2a72022-04-15 14:12:08 +01002618 return Fail("%s: Could not read weights", __func__);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002619 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002620 // Basic sanity check on the weights shape.
2621 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2622 // [1, filter_height, filter_width, depth_out]
2623 if (weightsOperand->dimensions[0] != 1)
2624 {
2625 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2626 }
2627
Mike Kellyb5fdf382019-06-11 16:35:25 +01002628 armnn::DepthwiseConvolution2dDescriptor desc;
2629 desc.m_DataLayout = armnn::DataLayout::NHWC;
2630
Cathal Corbett915f2a72022-04-15 14:12:08 +01002631 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2632 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002633 {
2634 return Fail("%s: Operation has invalid inputs", __func__);
2635 }
2636
Cathal Corbett915f2a72022-04-15 14:12:08 +01002637 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2638 if (!biasOperand)
2639 {
2640 return Fail("%s: Could not read bias", __func__);
2641 }
2642
2643 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2644 if (!biasInput.IsValid())
2645 {
2646 return Fail("%s: Operation has invalid inputs", __func__);
2647 }
2648
2649 biasInput.SanitizeQuantizationScale(weightsInput, input);
2650 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2651 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002652
2653 ActivationFn activation;
2654
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002655 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002656 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002657 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2658 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2659 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2660 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2661 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2662 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002663 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002664 {
2665 return Fail("%s: Operation has invalid inputs", __func__);
2666 }
2667 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002668 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002669 {
2670 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002671 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2672 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2673 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002674 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002675 {
2676 return Fail("%s: Operation has invalid inputs", __func__);
2677 }
2678
Cathal Corbett915f2a72022-04-15 14:12:08 +01002679 const uint32_t kernelX = weightsInfo.GetShape()[2];
2680 const uint32_t kernelY = weightsInfo.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002681 const uint32_t inputX = inputInfo.GetShape()[2];
2682 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002683
2684 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2685 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2686 }
2687 else
2688 {
2689 return Fail("%s: Unsupported number of operation inputs", __func__);
2690 }
2691
2692 desc.m_BiasEnabled = true;
Cathal Corbett915f2a72022-04-15 14:12:08 +01002693 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002694
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002695 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002696 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2697 {
2698 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2699 IsDepthwiseConvolutionSupported,
2700 data.m_Backends,
2701 isSupported,
2702 inputInfo,
2703 outputInfo,
2704 desc,
Cathal Corbett915f2a72022-04-15 14:12:08 +01002705 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002706 biases);
2707 };
2708
2709 if(!IsDynamicTensor(outputInfo))
2710 {
2711 validateFunc(outputInfo, isSupported);
2712 }
2713 else
2714 {
2715 isSupported = AreDynamicTensorsSupported();
2716 }
2717
2718
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002719 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002720 {
2721 return false;
2722 }
2723
Cathal Corbett915f2a72022-04-15 14:12:08 +01002724 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002725 if (!startLayer)
2726 {
2727 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2728 }
2729
Mike Kellyb5fdf382019-06-11 16:35:25 +01002730 input.Connect(startLayer->GetInputSlot(0));
2731
Cathal Corbett915f2a72022-04-15 14:12:08 +01002732 // Connect weights and bias inputs
2733 weightsInput.Connect(startLayer->GetInputSlot(1));
2734 biasInput.Connect(startLayer->GetInputSlot(2));
2735
Kevin Mayfcf2a152020-09-08 16:06:32 +01002736 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2737 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002738}
2739
Mike Kelly3c673942019-07-25 09:26:06 +01002740template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002741 typename HalOperation = typename HalPolicy::Operation,
2742 typename HalModel = typename HalPolicy::Model>
2743bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002744{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002745 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002746
2747 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2748 if (!input.IsValid())
2749 {
2750 return Fail("%s: Operation has invalid input", __func__);
2751 }
2752
Sadik Armagan98c0f662019-11-21 15:54:36 +00002753 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2754 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2755 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2756 {
2757 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2758 }
2759
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002760 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002761 if (!outputOperand)
2762 {
2763 return Fail("%s: Operation has invalid outputs", __func__);
2764 }
2765
2766 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002767
2768 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002769 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2770 {
2771 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2772 IsDequantizeSupported,
2773 data.m_Backends,
2774 isSupported,
2775 inputInfo,
2776 outputInfo);
2777 };
2778
2779 if(IsDynamicTensor(outputInfo))
2780 {
2781 isSupported = AreDynamicTensorsSupported();
2782 }
2783 else
2784 {
2785 validateFunc(outputInfo, isSupported);
2786 }
2787
Mike Kelly46272802019-08-14 17:00:48 +01002788 if (!isSupported)
2789 {
2790 return false;
2791 }
2792
2793 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Mike Kellye2d611e2021-10-14 12:35:58 +01002794 if (!layer)
2795 {
2796 return Fail("%s: Could not add the DequantizeLayer", __func__);
2797 }
Mike Kelly46272802019-08-14 17:00:48 +01002798 input.Connect(layer->GetInputSlot(0));
2799
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002800 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002801}
2802
2803template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002804 typename HalOperation = typename HalPolicy::Operation,
2805 typename HalModel = typename HalPolicy::Model>
2806bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002807{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002808 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002809
2810 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2811 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2812
2813 if (!input0.IsValid() || !input1.IsValid())
2814 {
2815 return Fail("%s: Operation has invalid inputs", __func__);
2816 }
2817
2818 // The FuseActivation parameter is always the input index 2
2819 // and it should be optional
2820 ActivationFn activationFunction;
2821 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2822 {
2823 return Fail("%s: Operation has invalid inputs", __func__);
2824 }
2825
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002826 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002827 if (!output)
2828 {
2829 return Fail("%s: Could not read output 0", __func__);
2830 }
2831
2832 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002833
2834 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002835 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2836 {
2837 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2838 IsDivisionSupported,
2839 data.m_Backends,
2840 isSupported,
2841 input0.GetTensorInfo(),
2842 input1.GetTensorInfo(),
2843 outputInfo);
2844 };
2845
2846 if(!IsDynamicTensor(outputInfo))
2847 {
2848 validateFunc(outputInfo, isSupported);
2849 }
2850 else
2851 {
2852 isSupported = AreDynamicTensorsSupported();
2853 }
2854
Mike Kelly46272802019-08-14 17:00:48 +01002855 if (!isSupported)
2856 {
2857 return false;
2858 }
2859
2860 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002861
Kevin Mayfcf2a152020-09-08 16:06:32 +01002862 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2863 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002864 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002865 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002866 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002867
2868 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2869 data, nullptr, validateFunc, activationFunction);
2870
Mike Kelly46272802019-08-14 17:00:48 +01002871}
2872
2873template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002874 typename HalOperation = typename HalPolicy::Operation,
2875 typename HalModel = typename HalPolicy::Model>
2876bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002877{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002878 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002879
2880 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2881 if (!input.IsValid())
2882 {
2883 return Fail("%s: Operation has invalid inputs", __func__);
2884 }
2885
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002886 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002887 if (!outputOperand)
2888 {
2889 return Fail("%s: Operation has invalid outputs", __func__);
2890 }
2891
2892 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002893
2894 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002895 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2896 {
2897 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2898 IsFloorSupported,
2899 data.m_Backends,
2900 isSupported,
2901 input.GetTensorInfo(),
2902 outputInfo);
2903 };
2904
2905 if(!IsDynamicTensor(outputInfo))
2906 {
2907 validateFunc(outputInfo, isSupported);
2908 }
2909 else
2910 {
2911 isSupported = AreDynamicTensorsSupported();
2912 }
2913
Mike Kelly46272802019-08-14 17:00:48 +01002914 if (!isSupported)
2915 {
2916 return false;
2917 }
2918
2919 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Mike Kellye2d611e2021-10-14 12:35:58 +01002920 if (!layer)
2921 {
2922 return Fail("%s: Could not add the FloorLayer", __func__);
2923 }
Mike Kelly46272802019-08-14 17:00:48 +01002924 input.Connect(layer->GetInputSlot(0));
2925
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002926 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002927}
2928
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002929inline bool IsQSymm8(const V1_0::Operand&)
2930{
2931 return false;
2932}
2933
Kevin May42477c12020-03-26 13:34:14 +00002934#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002935
2936inline bool IsQSymm8(const V1_2::Operand& operand)
2937{
2938 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2939}
2940
2941#endif
2942
Kevin May42477c12020-03-26 13:34:14 +00002943#ifdef ARMNN_ANDROID_NN_V1_3
2944
2945inline bool IsQSymm8(const V1_3::Operand& operand)
2946{
2947 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2948}
2949
2950#endif
2951
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002952enum class DequantizeStatus
2953{
2954 SUCCESS,
2955 NOT_REQUIRED,
2956 INVALID_OPERAND
2957};
2958
2959using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2960
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002961template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002962 typename HalOperation = typename HalPolicy::Operation,
2963 typename HalModel = typename HalPolicy::Model>
2964DequantizeResult DequantizeIfRequired(size_t operand_index,
2965 const HalOperation& operation,
2966 const HalModel& model,
2967 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002968{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002969 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002970
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002971 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002972 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002973 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002974 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002975 }
2976
2977 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2978 {
2979 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002980 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002981 }
2982
2983 const size_t weightsInputIndex = operation.inputs[operand_index];
2984
2985 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2986 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002987 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002988 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002989 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002990 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002991 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2992 {
2993 continue;
2994 }
2995
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002996 size_t outOpIndex = weightsInputIndex + 1;
2997 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002998 {
2999 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003000 }
3001
3002 if (outOpIndex != weightsInputIndex)
3003 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003004 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003005 }
3006
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003007 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Mike Kellye2d611e2021-10-14 12:35:58 +01003008
3009 if (!operand)
3010 {
3011 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3012 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003013
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003014 if (!IsQSymm8(*operand))
3015 {
3016 // Only supporting dequantize from QSYMM8 to FLOAT
3017 break;
3018 }
3019
3020 // Allocate a new buffer for the dequantized data and manually dequantize
3021 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3022 if (!startValue)
3023 {
3024 // Failed to get the operand address
3025 break;
3026 }
3027
3028 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3029 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003030 const float quantizationScale = operand->scale;
3031
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003032 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3033 for (size_t i = 0; i < dequantizedBufferLength; ++i)
3034 {
3035 float* dstPtr = dequantizedBuffer.get();
Mike Kellye2d611e2021-10-14 12:35:58 +01003036
3037 if (!dstPtr)
3038 {
3039 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3040 }
Mike Kelly1b46d132021-11-03 11:12:45 +00003041 *dstPtr = quantizedBuffer[i] * quantizationScale;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003042 }
3043
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003044 // Construct tensor info for dequantized ConstTensor
3045 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3046 operand->dimensions.data(),
3047 armnn::DataType::Float32);
3048
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003049 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3050 std::move(tensorInfo),
3051 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003052 }
3053
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003054 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003055}
3056
3057template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003058 typename HalOperation = typename HalPolicy::Operation,
3059 typename HalModel = typename HalPolicy::Model>
3060ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3061 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003062 const ConversionData& data,
3063 size_t operandIndex,
3064 bool optional = false)
3065{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003066 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3067
3068 DequantizeStatus status = std::get<3>(dequantized);
3069 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003070 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003071 case DequantizeStatus::INVALID_OPERAND:
3072 {
3073 // return invalid const tensor pin
3074 return ConstTensorPin();
3075 }
3076 case DequantizeStatus::NOT_REQUIRED:
3077 {
3078 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3079 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3080 }
3081 case DequantizeStatus::SUCCESS:
3082 default:
3083 {
3084 return ConstTensorPin(
3085 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3086 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003087 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003088}
3089
3090
Mike Kelly46272802019-08-14 17:00:48 +01003091template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003092 typename HalOperation = typename HalPolicy::Operation,
3093 typename HalModel = typename HalPolicy::Model>
3094bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003095{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003096 using HalOperand = typename HalPolicy::Operand;
3097
Mike Kelly46272802019-08-14 17:00:48 +01003098 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3099 if (!input.IsValid())
3100 {
3101 return Fail("%s: Operation has invalid inputs", __func__);
3102 }
3103
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003104 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003105 if (!output)
3106 {
3107 return Fail("%s: Could not read output 0", __func__);
3108 }
3109
3110 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3111 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3112
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003113 LayerInputHandle weightsInput = LayerInputHandle();
3114 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3115 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003116 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003117 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003118 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003119
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003120 // If weights are constant a separate constant layer will be created to store data.
3121 // Otherwise handle non const weights as inputs.
3122 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3123 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003124 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003125 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003126 }
3127
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003128 LayerInputHandle biasInput = LayerInputHandle();
3129 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3130 if (!biasOperand)
3131 {
3132 return Fail("%s: Could not read bias", __func__);
3133 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003134
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003135 // If bias are constant a separate constant layer will be created to store data.
3136 // Otherwise handle non const bias as inputs.
3137 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3138 if (!biasInput.IsValid())
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003139 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003140 return Fail("%s: Operation has invalid inputs", __func__);
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003141 }
3142
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003143 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003144 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003145 try
3146 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003147 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003148 }
3149 catch (const std::exception& e)
3150 {
Mike Kelly46272802019-08-14 17:00:48 +01003151 return Fail("%s: %s", __func__, e.what());
3152 }
3153
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003154 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003155 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003156 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003157
3158 ActivationFn activationFunction;
3159 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3160 {
3161 return Fail("%s: Operation has invalid inputs", __func__);
3162 }
3163
3164 armnn::FullyConnectedDescriptor desc;
3165 desc.m_TransposeWeightMatrix = true;
3166 desc.m_BiasEnabled = true;
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003167 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003168
3169 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003170 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3171 {
Finn Williams49184462020-10-02 13:28:34 +01003172 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003173 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003174 outputInfo.GetShape(),
3175 desc.m_TransposeWeightMatrix))
3176 {
3177 isSupported = false;
3178 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3179 return;
3180 }
3181
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003182 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003183 IsFullyConnectedSupported,
3184 data.m_Backends,
3185 isSupported,
3186 reshapedInfo,
3187 outputInfo,
3188 weightsInfo,
3189 biasInfo,
3190 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003191 };
3192
3193 if(!IsDynamicTensor(outputInfo))
3194 {
3195 validateFunc(outputInfo, isSupported);
3196 }
3197 else
3198 {
3199 isSupported = AreDynamicTensorsSupported();
3200 }
3201
Mike Kelly46272802019-08-14 17:00:48 +01003202 if (!isSupported)
3203 {
3204 return false;
3205 }
3206
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003207 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3208 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Mike Kelly46272802019-08-14 17:00:48 +01003209
Kevin Mayfcf2a152020-09-08 16:06:32 +01003210 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003211 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003212 armnn::ReshapeDescriptor reshapeDescriptor;
3213 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003214
Kevin Mayfcf2a152020-09-08 16:06:32 +01003215 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003216 if (!reshapeLayer)
3217 {
3218 return Fail("%s: could not add the reshapeLayer", __func__);
3219 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003220 input.Connect(reshapeLayer->GetInputSlot(0));
3221 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3222 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003223 }
3224 else
3225 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003226 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003227 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003228
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003229 // Connect weights and bias inputs
3230 weightsInput.Connect(startLayer->GetInputSlot(1));
3231 biasInput.Connect(startLayer->GetInputSlot(2));
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003232
Kevin Mayfcf2a152020-09-08 16:06:32 +01003233 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3234 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003235}
3236
3237template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003238 typename HalOperation = typename HalPolicy::Operation,
3239 typename HalModel = typename HalPolicy::Model>
3240bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003241{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003242 using HalOperand = typename HalPolicy::Operand;
3243
Mike Kelly999e2092019-08-15 10:46:46 +01003244 if (operation.inputs.size() != 1)
3245 {
3246 return Fail("%s: Optional inputs are not supported", __func__);
3247 }
3248
Mike Kelly46272802019-08-14 17:00:48 +01003249 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3250 if (!input.IsValid())
3251 {
3252 return Fail("%s: Operation has invalid inputs", __func__);
3253 }
3254
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003255 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003256 if (!output)
3257 {
3258 return Fail("%s: Could not read output 0", __func__);
3259 }
3260
3261 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3262 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3263
Mike Kelly46272802019-08-14 17:00:48 +01003264 if (outputInfo.GetNumDimensions() != 4u)
3265 {
3266 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3267 }
3268
3269 armnn::L2NormalizationDescriptor desc;
3270 desc.m_DataLayout = armnn::DataLayout::NHWC;
3271
3272 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003273 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3274 {
3275 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3276 IsL2NormalizationSupported,
3277 data.m_Backends,
3278 isSupported,
3279 inputInfo,
3280 outputInfo,
3281 desc);
3282 };
3283
3284 if(!IsDynamicTensor(outputInfo))
3285 {
3286 validateFunc(outputInfo, isSupported);
3287 }
3288 else
3289 {
3290 isSupported = AreDynamicTensorsSupported();
3291 }
3292
Mike Kelly46272802019-08-14 17:00:48 +01003293 if (!isSupported)
3294 {
3295 return false;
3296 }
3297
3298 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Mike Kellye2d611e2021-10-14 12:35:58 +01003299 if (!layer)
3300 {
3301 return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3302 }
Mike Kelly46272802019-08-14 17:00:48 +01003303 input.Connect(layer->GetInputSlot(0));
3304
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003305 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003306}
3307
3308template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003309 typename HalOperation = typename HalPolicy::Operation,
3310 typename HalModel = typename HalPolicy::Model>
3311bool ConvertLocalResponseNormalization(const HalOperation& operation,
3312 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003313 ConversionData& data)
3314{
Mike Kelly999e2092019-08-15 10:46:46 +01003315 if (operation.inputs.size() != 5)
3316 {
3317 return Fail("%s: Optional inputs are not supported", __func__);
3318 }
3319
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003320 using HalOperand = typename HalPolicy::Operand;
3321 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003322
3323 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3324 if (!input.IsValid())
3325 {
3326 return Fail("%s: Operation has invalid inputs", __func__);
3327 }
3328
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003329 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003330 if (!output)
3331 {
3332 return Fail("%s: Could not read output 0", __func__);
3333 }
3334
3335 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3336 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3337
Mike Kelly46272802019-08-14 17:00:48 +01003338 if (outputInfo.GetNumDimensions() != 4u)
3339 {
3340 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3341 }
3342
3343 armnn::NormalizationDescriptor descriptor;
3344 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3345 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3346 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3347
3348 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003349 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003350 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3351 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3352 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3353 {
3354 return Fail("%s: Operation has invalid inputs", __func__);
3355 }
3356
3357 // ArmNN expects normSize to be the full size of the normalization
3358 // window rather than the radius as in AndroidNN.
3359 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3360
3361 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003362 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3363 {
3364 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3365 IsNormalizationSupported,
3366 data.m_Backends,
3367 isSupported,
3368 inputInfo,
3369 outputInfo,
3370 descriptor);
3371 };
3372
3373 if(!IsDynamicTensor(outputInfo))
3374 {
3375 validateFunc(outputInfo, isSupported);
3376 }
3377 else
3378 {
3379 isSupported = AreDynamicTensorsSupported();
3380 }
3381
Mike Kelly46272802019-08-14 17:00:48 +01003382 if (!isSupported)
3383 {
3384 return false;
3385 }
3386
Mike Kelly46272802019-08-14 17:00:48 +01003387 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003388 if (!layer)
3389 {
3390 return Fail("%s: Could not add the NormalizationLayer", __func__);
3391 }
Mike Kelly46272802019-08-14 17:00:48 +01003392 input.Connect(layer->GetInputSlot(0));
3393
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003394 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003395}
3396
3397template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003398 typename HalOperation = typename HalPolicy::Operation,
3399 typename HalModel = typename HalPolicy::Model>
3400bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003401{
Mike Kelly46272802019-08-14 17:00:48 +01003402 armnn::ActivationDescriptor desc;
3403 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3404
3405 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3406}
3407
3408template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003409 typename HalOperation = typename HalPolicy::Operation,
3410 typename HalModel = typename HalPolicy::Model>
3411bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003412{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003413 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003414
3415 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3416 if (!input.IsValid())
3417 {
3418 return Fail("%s: Operation has invalid inputs", __func__);
3419 }
3420
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003421 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003422 if (!output)
3423 {
3424 return Fail("%s: Could not read output 0", __func__);
3425 }
3426
3427 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003428
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003429 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003430 if (!axisOperand)
3431 {
3432 return Fail("%s: Could not read input 1", __func__);
3433 }
3434
3435 std::vector<int32_t> axis;
3436 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3437 {
3438 return Fail("%s: Input 1 has invalid values", __func__);
3439 }
3440
3441 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3442
3443 // Convert the axis to unsigned int and remove duplicates.
3444 unsigned int rank = inputInfo.GetNumDimensions();
3445 std::set<unsigned int> uniqueAxis;
3446 std::transform(axis.begin(), axis.end(),
3447 std::inserter(uniqueAxis, uniqueAxis.begin()),
3448 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3449
3450 // Get the "keep dims" flag.
3451 int32_t keepDims = 0;
3452 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3453 {
3454 return Fail("%s: Could not read input 2", __func__);
3455 }
3456
3457 armnn::MeanDescriptor descriptor;
3458 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3459 descriptor.m_KeepDims = keepDims > 0;
3460
3461 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003462 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3463 {
3464 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3465 IsMeanSupported,
3466 data.m_Backends,
3467 isSupported,
3468 inputInfo,
3469 outputInfo,
3470 descriptor);
3471 };
3472
3473 if(!IsDynamicTensor(outputInfo))
3474 {
3475 validateFunc(outputInfo, isSupported);
3476 }
3477 else
3478 {
3479 isSupported = AreDynamicTensorsSupported();
3480 }
3481
Mike Kelly46272802019-08-14 17:00:48 +01003482 if (!isSupported)
3483 {
3484 return false;
3485 }
3486
3487 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003488 if (!layer)
3489 {
3490 return Fail("%s: Could not add the MeanLayer", __func__);
3491 }
Mike Kelly46272802019-08-14 17:00:48 +01003492 input.Connect(layer->GetInputSlot(0));
3493
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003494 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003495}
3496
3497template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003498 typename HalOperation = typename HalPolicy::Operation,
3499 typename HalModel = typename HalPolicy::Model>
3500bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003501{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003502 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003503
3504 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3505 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3506
3507 if (!input0.IsValid() || !input1.IsValid())
3508 {
3509 return Fail("%s: Operation has invalid inputs", __func__);
3510 }
3511
3512 // The FuseActivation parameter is always the input index 2
3513 // and it should be optional
3514 ActivationFn activationFunction;
3515 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3516 {
3517 return Fail("%s: Operation has invalid inputs", __func__);
3518 }
3519
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003520 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003521
3522 if (outputOperand == nullptr)
3523 {
3524 return false;
3525 }
3526
3527 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003528
3529 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003530 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3531 {
3532 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3533 IsMultiplicationSupported,
3534 data.m_Backends,
3535 isSupported,
3536 input0.GetTensorInfo(),
3537 input1.GetTensorInfo(),
3538 outputInfo);
3539 };
3540
3541 if(!IsDynamicTensor(outputInfo))
3542 {
3543 validateFunc(outputInfo, isSupported);
3544 }
3545 else
3546 {
3547 isSupported = AreDynamicTensorsSupported();
3548 }
3549
Mike Kelly46272802019-08-14 17:00:48 +01003550 if (!isSupported)
3551 {
3552 return false;
3553 }
3554
3555 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003556
Kevin Mayfcf2a152020-09-08 16:06:32 +01003557 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3558 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003559 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003560 return false;
3561 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003562
Kevin Mayfcf2a152020-09-08 16:06:32 +01003563 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3564 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003565}
3566
3567template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003568 typename HalOperation = typename HalPolicy::Operation,
3569 typename HalModel = typename HalPolicy::Model>
3570bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003571{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003572 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003573
Mike Kelly3c673942019-07-25 09:26:06 +01003574 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3575 if (!input.IsValid())
3576 {
3577 return Fail("%s: Operation has invalid inputs", __func__);
3578 }
3579
3580 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3581 unsigned int rank = inputInfo.GetNumDimensions();
3582
3583 armnn::PadDescriptor descriptor;
3584 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3585 {
3586 return Fail("%s: Could not convert paddings", __func__);
3587 }
3588
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003589 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3590 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003591 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3592 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3593 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003594 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003595 {
3596 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3597 }
3598
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003599 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003600 if (!output)
3601 {
3602 return Fail("%s: Could not read output", __func__);
3603 }
3604
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003605 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003606
3607 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003608 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3609 {
3610 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3611 IsPadSupported,
3612 data.m_Backends,
3613 isSupported,
3614 inputInfo,
3615 outputInfo,
3616 descriptor);
3617 };
3618
3619 if(!IsDynamicTensor(outputInfo))
3620 {
3621 validateFunc(outputInfo, isSupported);
3622 }
3623 else
3624 {
3625 isSupported = AreDynamicTensorsSupported();
3626 }
3627
Mike Kelly3c673942019-07-25 09:26:06 +01003628 if (!isSupported)
3629 {
3630 return false;
3631 }
3632
3633 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003634 if (!layer)
3635 {
3636 return Fail("%s: Could not add the PadLayer", __func__);
3637 }
Mike Kelly3c673942019-07-25 09:26:06 +01003638 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003639
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003640 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003641}
3642
Mike Kelly0a879362019-07-29 16:56:31 +01003643template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003644 typename HalOperation = typename HalPolicy::Operation,
3645 typename HalModel = typename HalPolicy::Model>
3646bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003647{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003648 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003649
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003650 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3651 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3652 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003653
3654 if (inputOperand == nullptr
3655 || requestedShapeOperand == nullptr
3656 || outputOperand == nullptr)
3657 {
3658 return Fail("%s: Operation has invalid inputs", __func__);
3659 }
3660
3661 if (requestedShapeOperand->dimensions.size() != 1)
3662 {
3663 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3664 __func__, requestedShapeOperand->dimensions.size());
3665 }
3666
3667 std::vector<int32_t> targetDimensions;
3668 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3669 {
3670 return Fail("%s: Could not read values of input 1", __func__);
3671 }
3672
3673 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3674
3675 Shape requestedShape;
3676 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3677 // function that resolves these values into a fully specified tensor shape.
3678 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3679 {
3680 return Fail("%s: Failed to resolve the requested shape", __func__);
3681 }
3682
Mike Kelly46272802019-08-14 17:00:48 +01003683 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3684 if (!input.IsValid())
3685 {
3686 return Fail("%s: Could not read input 0", __func__);
3687 }
3688
3689 armnn::ReshapeDescriptor reshapeDescriptor;
3690 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3691 requestedShape.dimensions.data());
3692
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003693 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3694
Mike Kelly46272802019-08-14 17:00:48 +01003695 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003696 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3697 {
3698 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3699 IsReshapeSupported,
3700 data.m_Backends,
3701 isSupported,
3702 input.GetTensorInfo(),
3703 outputInfo,
3704 reshapeDescriptor);
3705 };
3706
3707 if(!IsDynamicTensor(outputInfo))
3708 {
3709 validateFunc(outputInfo, isSupported);
3710 }
3711 else
3712 {
3713 isSupported = AreDynamicTensorsSupported();
3714 }
3715
Mike Kelly46272802019-08-14 17:00:48 +01003716 if (!isSupported)
3717 {
3718 return false;
3719 }
3720
3721 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003722 if (!layer)
3723 {
3724 return Fail("%s: Could not add the ReshapeLayer", __func__);
3725 }
Mike Kelly46272802019-08-14 17:00:48 +01003726 input.Connect(layer->GetInputSlot(0));
3727
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003728 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003729}
3730
3731template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003732 typename HalOperation = typename HalPolicy::Operation,
3733 typename HalModel = typename HalPolicy::Model>
3734bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003735{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003736 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003737
Mike Kelly0a879362019-07-29 16:56:31 +01003738 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3739 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3740
3741 if (!input0.IsValid() || !input1.IsValid())
3742 {
3743 return Fail("%s: Operation has invalid inputs", __func__);
3744 }
3745
3746 // The FuseActivation parameter is always the input index 2
3747 // and it should be optional
3748 ActivationFn activationFunction;
3749 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3750 {
3751 return Fail("%s: Operation has invalid inputs", __func__);
3752 }
3753
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003754 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003755 if (!output)
3756 {
3757 return Fail("%s: Could not read output 0", __func__);
3758 }
3759
3760 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003761
3762 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003763 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3764 {
3765 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3766 IsSubtractionSupported,
3767 data.m_Backends,
3768 isSupported,
3769 input0.GetTensorInfo(),
3770 input1.GetTensorInfo(),
3771 outputInfo);
3772 };
3773
3774 if(IsDynamicTensor(outputInfo))
3775 {
3776 isSupported = AreDynamicTensorsSupported();
3777 }
3778 else
3779 {
3780 validateFunc(outputInfo, isSupported);
3781 }
3782
Mike Kelly0a879362019-07-29 16:56:31 +01003783 if (!isSupported)
3784 {
3785 return false;
3786 }
3787
3788 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003789
Kevin Mayfcf2a152020-09-08 16:06:32 +01003790 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3791 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003792 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003793 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003794 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003795 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3796 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003797}
3798
Finn Williams23b87b32019-07-30 11:44:05 +01003799template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003800 typename HalOperation = typename HalPolicy::Operation,
3801 typename HalModel = typename HalPolicy::Model>
3802bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003803{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003804 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003805
3806 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3807 if (!input.IsValid())
3808 {
3809 return Fail("%s: Operation has invalid inputs", __func__);
3810 }
3811
3812 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3813 unsigned int rank = inputInfo.GetNumDimensions();
3814 if (rank > 4)
3815 {
3816 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3817 }
3818
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003819 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003820 if (!output)
3821 {
3822 return Fail("%s: Could not read output 0", __func__);
3823 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003824
3825 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003826 {
3827 return Fail("%s: Dynamic output tensors are not supported", __func__);
3828 }
3829
3830 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3831 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003832 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003833
Mike Kelly46272802019-08-14 17:00:48 +01003834 std::vector<int32_t> axis;
3835 if (!axisOperand)
3836 {
Mike Kelly1b46d132021-11-03 11:12:45 +00003837 for (unsigned int i = 0; i < rank; ++i)
3838 {
3839 axis.push_back(static_cast<unsigned int>(i));
3840 }
Mike Kelly46272802019-08-14 17:00:48 +01003841 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003842 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003843 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003844 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003845 }
3846
3847 std::vector<uint32_t> outputDims;
3848 for (unsigned int i = 0; i < rank; i++)
3849 {
3850 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3851 auto currentDimension = inputInfo.GetShape()[i];
3852 if (skipSqueeze || currentDimension != 1)
3853 {
3854 outputDims.push_back(currentDimension);
3855 }
3856 }
3857
3858 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3859
3860 armnn::TensorInfo outputInfo = inputInfo;
3861 outputInfo.SetShape(outShape);
3862
3863 armnn::ReshapeDescriptor reshapeDesc;
3864 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3865
3866 bool isSupported = false;
3867 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3868 IsReshapeSupported,
3869 data.m_Backends,
3870 isSupported,
3871 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003872 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003873 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003874
Mike Kelly46272802019-08-14 17:00:48 +01003875 if (!isSupported)
3876 {
3877 return false;
3878 }
3879
3880 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01003881 if (!layer)
3882 {
3883 return Fail("%s: Could not add the ReshapeLayer", __func__);
3884 }
Mike Kelly46272802019-08-14 17:00:48 +01003885 input.Connect(layer->GetInputSlot(0));
3886
3887 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3888}
3889
3890template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003891 typename HalOperation = typename HalPolicy::Operation,
3892 typename HalModel = typename HalPolicy::Model>
3893bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003894{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003895 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003896
3897 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3898 if (!input.IsValid())
3899 {
3900 return Fail("%s: Operation has invalid inputs", __func__);
3901 }
3902
3903 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3904 unsigned int rank = inputInfo.GetNumDimensions();
3905 if (rank > 4)
3906 {
3907 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3908 }
3909
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003910 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003911 if (!output)
3912 {
3913 return Fail("%s: Could not read output 0", __func__);
3914 }
3915
3916 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003917
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003918 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3919 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3920 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003921
3922 std::vector<int32_t> beginValues;
3923 std::vector<int32_t> endValues;
3924 std::vector<int32_t> stridesValues;
3925
3926 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003927 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003928 {
3929 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3930 {
3931 return false;
3932 }
3933
3934 if (operandValues.size() != rank)
3935 {
3936 return false;
3937 }
3938
3939 return true;
3940 };
3941
3942 if (!ValidateInputOperands(*beginOperand, beginValues)
3943 || !ValidateInputOperands(*endOperand, endValues)
3944 || !ValidateInputOperands(*stridesOperand, stridesValues))
3945 {
3946 return Fail("%s: Operation has invalid input operand", __func__);
3947 }
3948
3949 // Stride cannot have value '0'
3950 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3951 {
3952 return Fail("%s: Stride must be non-zero value.", __func__);
3953 }
3954
3955 armnn::StridedSliceDescriptor descriptor;
3956 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3957 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3958 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3959 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3960
3961 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3962 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3963 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3964 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3965 {
3966 return Fail("%s: Operation has invalid inputs", __func__);
3967 }
3968
3969 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003970 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3971 {
3972 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3973 IsStridedSliceSupported,
3974 data.m_Backends,
3975 isSupported,
3976 inputInfo,
3977 outputInfo,
3978 descriptor);
3979 };
3980
3981 if(IsDynamicTensor(outputInfo))
3982 {
3983 isSupported = AreDynamicTensorsSupported();
3984 }
3985 else
3986 {
3987 validateFunc(outputInfo, isSupported);
3988 }
3989
Mike Kelly46272802019-08-14 17:00:48 +01003990 if (!isSupported)
3991 {
3992 return false;
3993 }
3994
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003995 // Check if slice can fit in a inferred output
3996 armnn::TensorShape inputShape = inputInfo.GetShape();
3997 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3998 {
3999 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01004000
4001 if (descriptor.m_ShrinkAxisMask & (1 << i))
4002 {
4003 // If the difference between the start point and the end point of the slice on an axis being shrunk
4004 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
4005 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
4006 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
4007 {
4008 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
4009 }
Ryan OShea00b586b2020-07-03 11:31:20 +01004010
4011 if(stride < 0)
4012 {
4013 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
4014 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01004015 }
4016 }
4017
Mike Kelly46272802019-08-14 17:00:48 +01004018 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01004019 if (!layer)
4020 {
4021 return Fail("%s: Could not add the StridedSliceLayer", __func__);
4022 }
Mike Kelly46272802019-08-14 17:00:48 +01004023 input.Connect(layer->GetInputSlot(0));
4024
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004025 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004026}
4027
4028template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004029 typename HalOperation = typename HalPolicy::Operation,
4030 typename HalModel = typename HalPolicy::Model>
4031bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01004032{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004033 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01004034 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01004035
4036 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4037 if (!input.IsValid())
4038 {
4039 return Fail("%s: Operation has invalid inputs", __func__);
4040 }
4041
4042 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4043 unsigned int rank = inputInfo.GetNumDimensions();
4044 if (rank > 4)
4045 {
4046 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
4047 }
4048
4049 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
4050 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004051 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01004052
4053 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01004054 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01004055 {
Mike Kelly46272802019-08-14 17:00:48 +01004056 for (unsigned int i = rank; i > 0; i--)
4057 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01004058 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01004059 }
4060 }
Mike Kellyeec836e2020-02-18 10:03:30 +00004061 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01004062 {
Mike Kellyeec836e2020-02-18 10:03:30 +00004063 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01004064 }
4065
4066 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
4067
Mike Kelly4a956582020-02-28 10:32:09 +00004068 armnn::TransposeDescriptor transposeDesc;
4069 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01004070
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004071 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004072 if (!output)
4073 {
4074 return Fail("%s: Could not read output 0", __func__);
4075 }
4076
4077 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4078
4079 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004080 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4081 {
4082 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4083 IsTransposeSupported,
4084 data.m_Backends,
4085 isSupported,
4086 inputInfo,
4087 outputInfo,
4088 transposeDesc);
4089 };
4090
4091 if(IsDynamicTensor(outputInfo))
4092 {
4093 isSupported = AreDynamicTensorsSupported();
4094 }
4095 else
4096 {
4097 validateFunc(outputInfo, isSupported);
4098 }
4099
Mike Kelly46272802019-08-14 17:00:48 +01004100 if (!isSupported)
4101 {
4102 return false;
4103 }
4104
Mike Kelly4a956582020-02-28 10:32:09 +00004105 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01004106 if (!layer)
4107 {
4108 return Fail("%s: Could not add the TransposeLayer", __func__);
4109 }
Mike Kelly46272802019-08-14 17:00:48 +01004110 input.Connect(layer->GetInputSlot(0));
4111
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004112 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004113}
4114
4115template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004116 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004117 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004118 typename HalModel = typename HalPolicy::Model>
4119bool ConvertBatchToSpaceNd(const HalOperation& operation,
4120 const HalModel& model,
4121 ConversionData& data)
4122{
Finn Williams23b87b32019-07-30 11:44:05 +01004123
4124 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4125 if (!input.IsValid())
4126 {
4127 return Fail("%s: Operation has invalid inputs", __func__);
4128 }
4129
4130 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4131 if (!output)
4132 {
4133 return Fail("%s: Could not read output 0", __func__);
4134 }
4135
4136 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004137
4138 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4139 if (!blockOperand)
4140 {
4141 return Fail("%s: Could not read input 1", __func__);
4142 }
4143
4144 // Convert the block operand to int32
4145 std::vector<int32_t> block;
4146 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4147 {
4148 return Fail("%s: Input 1 has invalid values", __func__);
4149 }
4150
4151 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4152
4153 unsigned int rank = inputInfo.GetNumDimensions();
4154 if (rank != 4)
4155 {
4156 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4157 }
4158
4159 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4160 {
4161 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4162 " greater than or equal to 1", __func__);
4163 }
4164
4165 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4166 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4167 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4168
Kevin May42477c12020-03-26 13:34:14 +00004169 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004170 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004171 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004172 }
4173 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4174 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4175
4176 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004177 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4178 {
4179 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4180 IsBatchToSpaceNdSupported,
4181 data.m_Backends,
4182 isSupported,
4183 inputInfo,
4184 outputInfo,
4185 batchToSpaceNdDesc);
4186 };
4187
4188 if(!IsDynamicTensor(outputInfo))
4189 {
4190 validateFunc(outputInfo, isSupported);
4191 }
4192 else
4193 {
4194 isSupported = AreDynamicTensorsSupported();
4195 }
4196
4197
Finn Williams23b87b32019-07-30 11:44:05 +01004198 if (!isSupported)
4199 {
4200 return false;
4201 }
4202
4203 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01004204 if (!layer)
4205 {
4206 return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4207 }
Finn Williams23b87b32019-07-30 11:44:05 +01004208 input.Connect(layer->GetInputSlot(0));
4209
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004210 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004211}
Mike Kelly0a879362019-07-29 16:56:31 +01004212
Finn Williamsd74c5052019-07-30 17:06:00 +01004213template<typename HalPolicy,
4214 typename HalOperation = typename HalPolicy::Operation,
4215 typename HalOperand = typename HalPolicy::Operand,
4216 typename HalModel = typename HalPolicy::Model>
4217bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4218{
4219 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4220 if (!input.IsValid())
4221 {
4222 return Fail("%s: Operation has invalid inputs", __func__);
4223 }
4224
4225 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4226 unsigned int rank = inputInfo.GetNumDimensions();
4227 unsigned int spatialDim = rank - 2;
4228
4229 if (rank != 4)
4230 {
4231 Fail("%s: Only inputs with rank 4 are supported", __func__);
4232 }
4233
4234 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4235 if (!output)
4236 {
4237 return Fail("%s: Could not read output 0", __func__);
4238 }
4239
4240 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004241
4242 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4243 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4244
4245 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4246 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4247 {
4248 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4249 }
4250
4251 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004252 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4253 {
4254 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4255 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004256 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4257 {
4258 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4259 }
4260
4261 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4262 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4263 {
4264 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4265 }
4266
4267 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4268 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004269 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4270 {
4271 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4272 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004273 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4274 {
4275 int paddingBeforeInput = paddings[i];
4276 int paddingAfterInput = paddings[i + 1];
4277 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4278 {
4279 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4280 }
4281
Mike Kelly1b46d132021-11-03 11:12:45 +00004282 paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4283 static_cast<unsigned int>(paddingAfterInput));
Finn Williamsd74c5052019-07-30 17:06:00 +01004284 }
4285
4286 armnn::SpaceToBatchNdDescriptor descriptor;
4287 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4288 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4289 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4290
Kevin May42477c12020-03-26 13:34:14 +00004291 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004292 {
4293 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4294 }
4295
4296 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004297 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4298 {
4299 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4300 IsSpaceToBatchNdSupported,
4301 data.m_Backends,
4302 isSupported,
4303 inputInfo,
4304 outputInfo,
4305 descriptor);
4306 };
4307
4308 if(IsDynamicTensor(outputInfo))
4309 {
4310 isSupported = AreDynamicTensorsSupported();
4311 }
4312 else
4313 {
4314 validateFunc(outputInfo, isSupported);
4315 }
4316
Finn Williamsd74c5052019-07-30 17:06:00 +01004317 if (!isSupported)
4318 {
4319 return false;
4320 }
4321
4322 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01004323 if (!layer)
4324 {
4325 return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4326 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004327 input.Connect(layer->GetInputSlot(0));
4328
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004329 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004330}
4331
saoste01b8471482018-10-10 09:44:51 +01004332} // namespace armnn_driver