blob: 2e3a0424019527e84f7111fd5cf99af9b2d48de6 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlinee5872d2021-12-03 16:07:42 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000012#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010013#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
James Ward4e22f602020-10-20 15:50:33 +010024#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010025
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
Kevin Mayec1e5b82020-02-26 17:00:39 +000036#ifdef ARMNN_ANDROID_R
37using OperandType = android::nn::hal::OperandType;
38#endif
39
Sadik Armagan188675f2021-02-12 17:16:42 +000040#ifdef ARMNN_ANDROID_S
41#include <nnapi/Types.h>
42#endif
43
44
arovir01b0717b52018-09-05 17:03:25 +010045struct ConversionData
46{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010047 ConversionData(const std::vector<armnn::BackendId>& backends)
48 : m_Backends(backends)
49 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010050 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010051 {}
52
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010053 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010054 armnn::INetworkPtr m_Network;
55 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
56 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010057 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010058};
59
60class LayerInputHandle
61{
62public:
63 LayerInputHandle();
64 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
65
66 bool IsValid() const;
67
68 void Connect(armnn::IInputSlot& inputSlot);
69
Finn Williamsa4983ce2020-07-23 12:55:12 +010070 void Disconnect(armnn::IInputSlot& inputSlot);
71
arovir01b0717b52018-09-05 17:03:25 +010072 const armnn::TensorInfo& GetTensorInfo() const;
73
Cathal Corbett915f2a72022-04-15 14:12:08 +010074 void SanitizeQuantizationScale(LayerInputHandle& weight,
75 LayerInputHandle& input);
76
arovir01b0717b52018-09-05 17:03:25 +010077private:
78 armnn::IOutputSlot* m_OutputSlot;
79 bool m_Valid;
80 armnn::TensorInfo m_TensorInfo;
81};
82
83class ConstTensorPin
84{
85public:
86 // Creates an invalid tensor pin (can be used to signal errors)
87 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
88 ConstTensorPin(bool optional = false);
89
90 // @param tensorInfo TensorInfo associated with the tensor.
91 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
92 // the model being converted.
93 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010094 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010095 const armnn::PermutationVector& mappings);
96
97 ConstTensorPin(const ConstTensorPin& other) = delete;
98 ConstTensorPin(ConstTensorPin&& other) = default;
99
100 bool IsValid() const;
101 bool IsOptional() const;
102
103 const armnn::ConstTensor& GetConstTensor() const;
104 const armnn::ConstTensor* GetConstTensorPtr() const;
105
106private:
107 armnn::ConstTensor m_ConstTensor;
108
109 // Owned memory for swizzled tensor data, only required if the tensor needed
110 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
111 // the pools associated with the model being converted.
112 std::vector<uint8_t> m_SwizzledTensorData;
113
114 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
115 bool m_Optional;
116};
117
118} // namespace armnn_driver
119
120///
121/// Utility functions
122///
123
124namespace
125{
126
127using namespace armnn_driver;
128using namespace android::nn;
129
130// Convenience function to log the reason for failing to convert a model.
131// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
132template<class... Args>
133static bool Fail(const char* formatStr, Args&&... args)
134{
135 ALOGD(formatStr, std::forward<Args>(args)...);
136 return false;
137}
138
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
140// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
Cathal Corbett8de96f72022-09-01 13:34:59 +0100141#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100142try \
143{ \
144 for (auto&& backendId : backends) \
145 { \
146 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000147 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100148 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100149 std::string reasonIfUnsupported; \
150 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000151 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 { \
Cathal Corbett8de96f72022-09-01 13:34:59 +0100154 setBackend = backendId; \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100155 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 else \
158 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100159 if (reasonIfUnsupported.size() > 0) \
160 { \
161 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
162 } \
163 else \
164 { \
165 ALOGD("%s: not supported by armnn", funcName); \
166 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100167 } \
168 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100169 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100170 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100171 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100172 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100173 } \
174 if (!supported) \
175 { \
176 ALOGD("%s: not supported by any specified backend", funcName); \
177 } \
178} \
179catch (const armnn::InvalidArgumentException &e) \
180{ \
181 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
182}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100183
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000184template<typename HalOperand>
185armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100186{
187 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
188}
189
Matthew Bentham912b3622019-05-03 15:49:14 +0100190inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100191{
Matthew Bentham912b3622019-05-03 15:49:14 +0100192 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
193 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
194 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100195}
196
Kevin May42477c12020-03-26 13:34:14 +0000197#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100198
Keith Davis71006492020-01-06 17:44:16 +0000199// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100200inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
201{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000202 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000203 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000204 type == V1_2::OperandType::TENSOR_FLOAT16 ||
205 type == V1_2::OperandType::TENSOR_FLOAT32 ||
206 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000207 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000208 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
209 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100210 type == V1_2::OperandType::TENSOR_INT32;
211}
212
213#endif
214
Kevin May42477c12020-03-26 13:34:14 +0000215#ifdef ARMNN_ANDROID_NN_V1_3
216
217// Support within the 1.3 driver for specific tensor data types
218inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
219{
220 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100221 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000222 type == V1_3::OperandType::TENSOR_FLOAT16 ||
223 type == V1_3::OperandType::TENSOR_FLOAT32 ||
224 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100225 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000226 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
227 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
228 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
229 type == V1_3::OperandType::TENSOR_INT32;
230}
231
232#endif
233
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234inline bool IsBool(V1_0::Operand)
235{
236 return false;
237}
238
Kevin May42477c12020-03-26 13:34:14 +0000239inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100240{
241 return false;
242}
243
Kevin May42477c12020-03-26 13:34:14 +0000244#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100245
246inline bool IsBool(V1_2::Operand operand)
247{
248 return operand.type == V1_2::OperandType::BOOL;
249}
250
Sadik Armagan61113162019-07-25 09:09:40 +0100251/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000252inline bool Is12OrLaterOperand(V1_2::Operand)
253{
254 return true;
255}
256
257#endif
258
259#ifdef ARMNN_ANDROID_NN_V1_3
260
261inline bool IsBool(V1_3::Operand operand)
262{
263 return operand.type == V1_3::OperandType::BOOL;
264}
265
266/// Checks if a operand is 1_2 Operand
267inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100268{
269 return true;
270}
271
Mike Kellyb5fdf382019-06-11 16:35:25 +0100272#endif
273
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000275armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
276 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100277 armnn::TensorInfo reshapeInfo)
278{
279 armnn::ReshapeDescriptor reshapeDescriptor;
280 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
281
282 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +0100283 if (!reshapeLayer)
284 {
285 throw armnn::RuntimeException("ReshapeLayer is null");
286 }
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100287
288 // Attach the input layer to the reshape layer
289 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
290 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
291
292 return *reshapeLayer;
293}
294
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000295bool BroadcastTensor(LayerInputHandle& input0,
296 LayerInputHandle& input1,
297 armnn::IConnectableLayer* startLayer,
298 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100299{
Mike Kellye2d611e2021-10-14 12:35:58 +0100300 if (!startLayer)
301 {
302 throw armnn::RuntimeException("StartLayer is null");
303 }
arovir01b0717b52018-09-05 17:03:25 +0100304
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
306 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
307
308 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
309 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
310
311 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100312 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
314 input0.Connect(startLayer->GetInputSlot(0));
315 input1.Connect(startLayer->GetInputSlot(1));
316
Sadik Armagan64b19b52019-08-19 09:49:58 +0100317 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100318 }
319
320 // Since the number of dimensions do not match then we need to add degenerate dimensions
321 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
322
323 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100324 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
325 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100326
327 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
328 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
329 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
330
331 const armnn::TensorShape& smallShape = smallInfo.GetShape();
332 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
333 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
334 {
335 reshapedDimensions[i] = smallShape[i - sizeDifference];
336 }
337
338 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100339 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100340 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100341
342 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
343 armnn::ReshapeDescriptor reshapeDescriptor;
344
345 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +0100346 armnn::BackendId setBackend;
Sadik Armagan64b19b52019-08-19 09:49:58 +0100347 FORWARD_LAYER_SUPPORT_FUNC(__func__,
348 IsReshapeSupported,
349 data.m_Backends,
350 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100351 setBackend,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000352 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100353 reshapedInfo,
354 reshapeDescriptor);
355 if (!isSupported)
356 {
357 return false;
358 }
359
Mike Kellye2d611e2021-10-14 12:35:58 +0100360 if (!data.m_Network)
361 {
362 throw armnn::RuntimeException("Network is null");
363 }
364
Sadik Armagan64b19b52019-08-19 09:49:58 +0100365 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100366 reshapeLayer.SetBackendId(setBackend);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100367
368 if (input0IsSmaller)
369 {
370 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
371 //
372 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100373 // | |
374 // Reshape |
375 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100377
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100378 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
379 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100380 }
381 else
382 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100383 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
384 //
385 // Input0 Input1
386 // | |
387 // | Reshape
388 // \ /
389 // StartLayer
390
arovir01b0717b52018-09-05 17:03:25 +0100391 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100392 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100393 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100394
395 return true;
arovir01b0717b52018-09-05 17:03:25 +0100396}
397
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000398void CalcPadding(uint32_t input,
399 uint32_t kernel,
400 uint32_t stride,
401 uint32_t& outPadHead,
402 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100403 android::nn::PaddingScheme scheme)
404{
405 int32_t padHead;
406 int32_t padTail;
407 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100408 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
409 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100410}
411
Kevin May42477c12020-03-26 13:34:14 +0000412#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100413
414void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
415 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
416{
417 int32_t padHead;
418 int32_t padTail;
419 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100420 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
421 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100422}
423
Mike Kelly26123db2020-01-15 10:02:33 +0000424void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100425 int32_t& outPadTail, android::nn::PaddingScheme scheme)
426{
427 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
428}
429
Mike Kelly86b36d42019-07-12 16:39:33 +0100430#endif
431
Matthew Bentham912b3622019-05-03 15:49:14 +0100432Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100433{
434 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100435 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100436 shape.dimensions = operand.dimensions;
437 shape.scale = operand.scale;
438 shape.offset = operand.zeroPoint;
439 return shape;
440}
441
Kevin May42477c12020-03-26 13:34:14 +0000442#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100443
444Shape GetOperandShape(const V1_2::Operand& operand)
445{
446 Shape shape;
447 shape.type = OperandType(operand.type);
448 shape.dimensions = operand.dimensions;
449 shape.scale = operand.scale;
450 shape.offset = operand.zeroPoint;
451 return shape;
452}
453
454#endif
455
Kevin May42477c12020-03-26 13:34:14 +0000456#ifdef ARMNN_ANDROID_NN_V1_3
457
458Shape GetOperandShape(const V1_3::Operand& operand)
459{
460 Shape shape;
461 shape.type = OperandType(operand.type);
462 shape.dimensions = operand.dimensions;
463 shape.scale = operand.scale;
464 shape.offset = operand.zeroPoint;
465 return shape;
466}
467
468#endif
469
arovir01b0717b52018-09-05 17:03:25 +0100470// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
471// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100472// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
473// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100474void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000475 const armnn::TensorInfo& weightInfo,
476 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100477{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000478 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100479 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000480 // NOTE: Bias scale is always set to 0 for per-axis quantization and
481 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
482 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100483 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000484 return biasScale * inputInfo.GetQuantizationScale();
485 };
486
487 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
488 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
489
490 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100491 // bias is expected to be a 1d tensor, set qdim=0
492 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000493
494 ALOGV("Bias quantization params have been updated for per-axis quantization");
495 }
496 else
497 {
498 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
499 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
500 {
James Ward4e22f602020-10-20 15:50:33 +0100501 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000502 {
503 ALOGW("Bias quantization scale has been modified to match input * weights");
504 biasInfo.SetQuantizationScale(expectedBiasScale);
505 }
arovir01b0717b52018-09-05 17:03:25 +0100506 }
507 }
508}
509
510// 4D Tensor Permutations
511const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100512const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000513const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100514
515// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000516const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
517const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100518
519template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000520armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
521 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100522{
523 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000524 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
Mike Kellye2d611e2021-10-14 12:35:58 +0100525 if (!layer)
526 {
527 throw armnn::RuntimeException("TransposeLayer is null");
528 }
arovir01b0717b52018-09-05 17:03:25 +0100529 // Connect input to swizzle layer
530 input.Connect(layer->GetInputSlot(0));
531
532 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000533 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100534 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
535
536 return *layer;
537}
538
arovir01b0717b52018-09-05 17:03:25 +0100539bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
540 const armnn::TensorShape & outputShape,
541 uint32_t concatDim)
542{
543 // Validate the output shape is correct given the input shapes (which have just been validated)
544 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
545 if (outputShape.GetNumDimensions() != numDimensions)
546 {
547 return Fail("%s: Output shape has wrong number of dimensions", __func__);
548 }
549
550 unsigned int outputSizeAlongConcatenatedDimension = 0;
551 for (unsigned int i = 0; i < inputShapes.size(); i++)
552 {
553 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
554 }
555
556 for (unsigned int i = 0; i < numDimensions; ++i)
557 {
558 if (i == concatDim)
559 {
560 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
561 {
562 return Fail(
563 "%s: Invalid output shape for dimension %d (%d != %d)",
564 __func__,
565 i,
566 outputShape[i],
567 outputSizeAlongConcatenatedDimension);
568 }
569 }
570 else
571 {
572 if (outputShape[i] != inputShapes[0][i])
573 {
574 return Fail("%s: Invalid output shape", __func__);
575 }
576 }
577 }
578
579 return true;
580}
581
582bool RequiresReshape(armnn::TensorShape & inputShape)
583{
584 return inputShape.GetNumDimensions() < 3;
585}
586
arovir01b0717b52018-09-05 17:03:25 +0100587void SwizzleInputs(armnn::INetwork& network,
588 std::vector<LayerInputHandle>& inputs,
589 std::vector<armnn::TensorShape>& inputShapes,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100590 const armnn::PermutationVector& mapping,
591 std::vector<armnn::BackendId>& setBackends)
arovir01b0717b52018-09-05 17:03:25 +0100592{
593 if (!mapping.IsEqual(IdentityPermutation4D))
594 {
595 size_t nInputs = inputs.size();
596 for (size_t i=0; i<nInputs; ++i)
597 {
598 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000599 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100600 swizzleLayer.SetBackendId(setBackends[i]);
arovir01b0717b52018-09-05 17:03:25 +0100601 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
602 auto& outputInfo = outputSlot.GetTensorInfo();
603 // replace inputs with the swizzled ones
604 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
605 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
606 }
607 }
608}
609
Teresa Charlin185f5882020-04-06 21:59:18 +0100610bool TransposeInputTensors(ConversionData& data,
611 std::vector<LayerInputHandle>& inputs,
612 std::vector<armnn::TensorShape>& inputShapes,
613 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000614{
David Monahan7f492ac2020-10-16 10:36:29 +0100615 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
616 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000617 {
Cathal Corbett8de96f72022-09-01 13:34:59 +0100618 std::vector<armnn::BackendId> setBackendsVec;
Teresa Charlin185f5882020-04-06 21:59:18 +0100619 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000620 size_t nInputs = inputs.size();
621 for (size_t i=0; i<nInputs; ++i)
622 {
623 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000624 armnn::TransposeDescriptor transposeDesc;
625 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100626 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000627
628 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +0100629 armnn::BackendId setBackend;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000630 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000631 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000632 data.m_Backends,
633 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100634 setBackend,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000635 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100636 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000637 transposeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100638 setBackendsVec.push_back(setBackend);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000639 if (!isSupported)
640 {
641 return false;
642 }
643
644 }
Cathal Corbett8de96f72022-09-01 13:34:59 +0100645 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000646 }
647 return true;
648}
649
650
narpra01f176d5a2018-11-18 20:17:48 +0000651bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
652 int32_t & concatDimension,
653 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100654{
narpra01f176d5a2018-11-18 20:17:48 +0000655 bool needPermute = false;
Mike Kellye2d611e2021-10-14 12:35:58 +0100656
657 if (numberOfDimensions < 3)
658 {
659 return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
660 }
arovir01b0717b52018-09-05 17:03:25 +0100661
662 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000663 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
664 // or along dimension 0 or 2 for a 3-D tensor.
665 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100666 {
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000667 concatDimension = 3;
668 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
narpra01f176d5a2018-11-18 20:17:48 +0000669 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100670 }
narpra01f176d5a2018-11-18 20:17:48 +0000671 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100672 {
narpra01f176d5a2018-11-18 20:17:48 +0000673 concatDimension = 0;
674 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
675 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100676 }
David Monahan7f492ac2020-10-16 10:36:29 +0100677 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
678 // permutation identity to only have 3 dimensions
679 else if (numberOfDimensions == 3 && concatDimension == 2)
680 {
681 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
682 }
narpra01f176d5a2018-11-18 20:17:48 +0000683 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100684}
685
686} // anonymous namespace
687
688namespace armnn_driver
689{
690
691//// Creates an ArmNN activation layer and connects it to the given layer, if the
692//// passed in AndroidNN activation function requires so.
693//// @return The end layer of the sequence of layers built for the given AndroidNN
694//// activation function or nullptr if an error occurred (e.g. unsupported activation).
695//// Note that the end layer matches the input layer if no activation is required
696//// (the sequence of layers has length 1).
697armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
698 ActivationFn activation,
699 armnn::IConnectableLayer* prevLayer,
700 ConversionData& data);
701
702} // namespace armnn_driver
703
704///
705/// Utility templates
706///
707
708namespace armnn_driver
709{
710
711using namespace android::nn;
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalOperation = typename HalPolicy::Operation,
716 typename HalModel = typename HalPolicy::Model>
717const HalOperand* GetInputOperand(const HalOperation& operation,
718 uint32_t inputIndex,
719 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100720 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100721{
722 if (inputIndex >= operation.inputs.size())
723 {
saoste01b8471482018-10-10 09:44:51 +0100724 if (failOnIndexOutOfBounds)
725 {
Mike Kellye2d611e2021-10-14 12:35:58 +0100726 Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
saoste01b8471482018-10-10 09:44:51 +0100727 }
arovir01b0717b52018-09-05 17:03:25 +0100728 return nullptr;
729 }
730
Kevin May42477c12020-03-26 13:34:14 +0000731 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100732 if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
733 {
734 Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
735 return nullptr;
736 }
737
Kevin May42477c12020-03-26 13:34:14 +0000738 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100739}
740
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741template<typename HalPolicy,
742 typename HalOperand = typename HalPolicy::Operand,
743 typename HalOperation = typename HalPolicy::Operation,
744 typename HalModel = typename HalPolicy::Model>
745const HalOperand* GetOutputOperand(const HalOperation& operation,
746 uint32_t outputIndex,
747 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100748{
749 if (outputIndex >= operation.outputs.size())
750 {
751 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
752 return nullptr;
753 }
754
755 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100756 if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
757 {
758 Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
759 return nullptr;
760 }
Kevin May42477c12020-03-26 13:34:14 +0000761 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100762}
763
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100765 typename HalOperand = typename HalPolicy::Operand,
766 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100767const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100768 const HalModel& model,
769 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000770 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100771{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100772 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100773
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100774 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100775 switch (operand.lifetime)
776 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100777 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100778 {
779 // Constant found in model.operandValues
780 valueStart = &model.operandValues[operand.location.offset];
781 break;
782 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100783 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100784 {
785 // Constant specified via a Memory object
786 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
787 break;
788 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000790 {
791 // An optional input tensor with no values is not an error so should not register as a fail
792 if (optional)
793 {
794 valueStart = nullptr;
795 break;
796 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100797 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000798 }
arovir01b0717b52018-09-05 17:03:25 +0100799 default:
800 {
801 // Unsupported/invalid (e.g. can't get value of an input to the model)
802 Fail("%s: unsupported/invalid operand lifetime: %s",
803 __func__, toString(operand.lifetime).c_str());
804 valueStart = nullptr;
805 }
806 }
807
808 return valueStart;
809}
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model,
814 typename HalOperandType = typename HalPolicy::OperandType>
815bool GetOperandType(const HalOperation& operation,
816 uint32_t inputIndex,
817 const HalModel& model,
818 HalOperandType& type)
819{
820 using HalOperand = typename HalPolicy::Operand;
821
822 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
823 if (!operand)
824 {
825 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
826 }
827
828 type = operand->type;
829 return true;
830}
831
832template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000833 typename HalOperand = typename HalPolicy::Operand>
834bool IsOperandConstant(const HalOperand& operand)
835{
836 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
837
838 HalOperandLifeTime lifetime = operand.lifetime;
839
840 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
841 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
842 lifetime == HalOperandLifeTime::NO_VALUE;
843}
844
845template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846 typename HalOperand = typename HalPolicy::Operand,
847 typename HalModel = typename HalPolicy::Model>
848ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
849 const HalModel& model,
850 const ConversionData& data,
851 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
852 const armnn::TensorShape* overrideTensorShape = nullptr,
853 bool optional = false)
854{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100855 if (!IsOperandTypeSupportedForTensors(operand.type))
856 {
857 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
858 return ConstTensorPin();
859 }
860
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000861 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100862 {
863 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
864 return ConstTensorPin();
865 }
866
867 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
868 if (!valueStart)
869 {
870 if (optional)
871 {
872 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
873 return ConstTensorPin(true);
874 }
875 // mandatory tensor with no values
876 Fail("%s: failed to get operand address", __func__);
877 return ConstTensorPin();
878 }
879
880 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000881
Matthew Sloyan29cc9612021-07-16 10:21:12 +0100882 // Make sure isConstant flag is set.
883 tensorInfo.SetConstant();
884
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 if (overrideTensorShape != nullptr)
886 {
887 tensorInfo.SetShape(*overrideTensorShape);
888 }
889 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
890}
891
892template<typename HalPolicy,
893 typename HalOperation = typename HalPolicy::Operation,
894 typename HalModel = typename HalPolicy::Model>
895ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
896 uint32_t inputIndex,
897 const HalModel& model,
898 const ConversionData& data,
899 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
900 const armnn::TensorShape* overrideTensorShape = nullptr,
901 bool optional = false)
902{
903 using HalOperand = typename HalPolicy::Operand;
904
905 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
906 if (!operand)
907 {
908 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
909 return ConstTensorPin();
910 }
911 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
912 model,
913 data,
914 dimensionMappings,
915 overrideTensorShape,
916 optional);
917}
918
919template<typename HalPolicy,
920 typename OutputType,
921 typename HalOperandType = typename HalPolicy::OperandType,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100924bool GetInputScalar(const HalOperation& operation,
925 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100926 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100927 OutputType& outValue,
928 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100929 const ConversionData& data,
930 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100931{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100932 using HalOperand = typename HalPolicy::Operand;
933
934 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100935 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100936 {
937 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
938 }
939
Sadik Armagan813f2302020-05-19 14:10:30 +0100940 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100941 {
942 return Fail("%s: unexpected operand type: %s (should be %s)",
943 __func__, toString(operand->type).c_str(), toString(type).c_str());
944 }
945
Sadik Armagan813f2302020-05-19 14:10:30 +0100946 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100947 {
948 return Fail("%s: incorrect operand location length: %i (should be %i)",
949 __func__, operand->location.length, sizeof(OutputType));
950 }
951
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100953 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100954 {
955 return Fail("%s: failed to get address for operand", __func__);
956 }
957
Sadik Armagan813f2302020-05-19 14:10:30 +0100958 if(!optional)
959 {
960 outValue = *(static_cast<const OutputType*>(valueAddress));
961 }
962
arovir01b0717b52018-09-05 17:03:25 +0100963 return true;
964}
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966template<typename HalPolicy,
967 typename HalOperation = typename HalPolicy::Operation,
968 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100969bool GetInputInt32(const HalOperation& operation,
970 uint32_t inputIndex,
971 int32_t& outValue,
972 const HalModel& model,
973 const ConversionData& data)
974{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100975 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100976}
977
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100978template<typename HalPolicy,
979 typename HalOperation = typename HalPolicy::Operation,
980 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100981bool GetInputFloat32(const HalOperation& operation,
982 uint32_t inputIndex,
983 float& outValue,
984 const HalModel& model,
985 const ConversionData& data)
986{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100988}
989
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990template<typename HalPolicy,
991 typename HalOperation = typename HalPolicy::Operation,
992 typename HalOperandType = typename HalPolicy::OperandType,
993 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100994bool GetInputActivationFunctionImpl(const HalOperation& operation,
995 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100996 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100997 ActivationFn& outActivationFunction,
998 const HalModel& model,
999 const ConversionData& data)
1000{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001001 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001002 {
1003 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
1004 __func__,
1005 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +00001006 toString(HalOperandType::INT32).c_str(),
1007 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001008 }
1009
1010 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001012 {
1013 return Fail("%s: failed to get activation input value", __func__);
1014 }
1015 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1016 return true;
1017}
1018
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001019template<typename HalPolicy,
1020 typename HalOperation = typename HalPolicy::Operation,
1021 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001022bool GetInputActivationFunction(const HalOperation& operation,
1023 uint32_t inputIndex,
1024 ActivationFn& outActivationFunction,
1025 const HalModel& model,
1026 const ConversionData& data)
1027{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1029 inputIndex,
1030 HalPolicy::OperandType::INT32,
1031 outActivationFunction,
1032 model,
1033 data);
arovir01b0717b52018-09-05 17:03:25 +01001034}
1035
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001036template<typename HalPolicy,
1037 typename HalOperation = typename HalPolicy::Operation,
1038 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001039bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1040 uint32_t inputIndex,
1041 ActivationFn& outActivationFunction,
1042 const HalModel& model,
1043 const ConversionData& data)
1044{
1045 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001046 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1047 inputIndex,
1048 HalPolicy::OperandType::INT32,
1049 outActivationFunction,
1050 model,
1051 data);
arovir01b0717b52018-09-05 17:03:25 +01001052}
1053
1054
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001055template<typename HalPolicy,
1056 typename HalOperation = typename HalPolicy::Operation,
1057 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001058bool GetOptionalInputActivation(const HalOperation& operation,
1059 uint32_t inputIndex,
1060 ActivationFn& activationFunction,
1061 const HalModel& model,
1062 const ConversionData& data)
1063{
1064 if (operation.inputs.size() <= inputIndex)
1065 {
1066 activationFunction = ActivationFn::kActivationNone;
1067 }
1068 else
1069 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001070 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001071 {
1072 return Fail("%s: Operation has invalid inputs", __func__);
1073 }
1074 }
1075 return true;
1076}
1077
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001078template<typename HalPolicy,
1079 typename ConvolutionDescriptor,
1080 typename HalOperation = typename HalPolicy::Operation,
1081 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001082bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1083 uint32_t dilationXIndex,
1084 ConvolutionDescriptor& descriptor,
1085 const HalModel& model,
1086 const ConversionData& data)
1087{
1088 bool success = true;
1089 if (operation.inputs.size() >= dilationXIndex + 2)
1090 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001091 success &= GetInputScalar<HalPolicy>(operation,
1092 dilationXIndex,
1093 HalPolicy::OperandType::INT32,
1094 descriptor.m_DilationX,
1095 model,
1096 data);
1097 success &= GetInputScalar<HalPolicy>(operation,
1098 dilationXIndex + 1,
1099 HalPolicy::OperandType::INT32,
1100 descriptor.m_DilationY,
1101 model,
1102 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001103 }
1104
1105 return success;
1106}
1107
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001109 typename HalOperation = typename HalPolicy::Operation,
1110 typename HalModel = typename HalPolicy::Model>
1111bool GetOptionalBool(const HalOperation& operation,
1112 uint32_t inputIndex,
1113 const HalModel& model,
1114 const ConversionData& data)
1115{
1116 using HalOperand = typename HalPolicy::Operand;
1117
1118 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1119 if (!operand)
1120 {
1121 return false;
1122 }
1123
1124 if (!IsBool(*operand))
1125 {
1126 return false;
1127 }
1128
1129 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1130 if (!valueAddress)
1131 {
1132 return false;
1133 }
1134
1135 if (*(static_cast<const bool*>(valueAddress)))
1136 {
1137 return true;
1138 }
1139 else
1140 {
1141 return false;
1142 }
1143}
1144
1145template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001146 typename HalOperand = typename HalPolicy::Operand,
1147 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001148bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001149 std::vector<int32_t>& outValues,
1150 const HalModel& model,
1151 const ConversionData& data)
1152{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001153 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001154 {
1155 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1156 }
1157
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001159 if (!startAddress)
1160 {
1161 return Fail("%s: failed to get operand address", __func__, operand.type);
1162 }
1163
1164 // Check number of bytes is sensible
1165 const uint32_t numBytes = operand.location.length;
1166 if (numBytes % sizeof(int32_t) != 0)
1167 {
1168 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1169 __func__, numBytes, sizeof(int32_t));
1170 }
1171
1172 outValues.resize(numBytes / sizeof(int32_t));
1173 memcpy(outValues.data(), startAddress, numBytes);
1174 return true;
1175}
1176
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177template<typename HalPolicy,
1178 typename HalOperation = typename HalPolicy::Operation,
1179 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001180bool GetInputPaddingScheme(const HalOperation& operation,
1181 uint32_t inputIndex,
1182 PaddingScheme& outPaddingScheme,
1183 const HalModel& model,
1184 const ConversionData& data)
1185{
1186 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001187 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001188 {
1189 return Fail("%s: failed to get padding scheme input value", __func__);
1190 }
1191
1192 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1193 return true;
1194}
1195
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196template<typename HalPolicy,
1197 typename HalOperation = typename HalPolicy::Operation,
1198 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001199LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1200 uint32_t inputIndex,
1201 const HalModel& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001202 ConversionData& data,
1203 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
arovir01b0717b52018-09-05 17:03:25 +01001204{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001205 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001206 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001207 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1208
1209 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001210 if (!operand)
1211 {
1212 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1213 return LayerInputHandle();
1214 }
1215
1216 if (!IsOperandTypeSupportedForTensors(operand->type))
1217 {
1218 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1219 return LayerInputHandle();
1220 }
1221
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 try
arovir01b0717b52018-09-05 17:03:25 +01001223 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001224 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001225 if (IsDynamicTensor(operandTensorInfo))
1226 {
1227 Fail("%s: dynamic input tensors are not supported", __func__);
1228 return LayerInputHandle();
1229 }
arovir01b0717b52018-09-05 17:03:25 +01001230
Sadik Armagan44bcc022019-06-18 17:21:36 +01001231 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001232 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001233 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001234 {
1235 // NOTE: We must check whether we can support the input tensor on at least one
1236 // of the provided backends; otherwise we cannot convert the operation
1237 bool isInputSupported = false;
1238 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1239 IsInputSupported,
1240 data.m_Backends,
1241 isInputSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001242 armnn::BackendId(),
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001243 operandTensorInfo);
1244
1245 if (!isInputSupported)
1246 {
1247 Fail("%s: unsupported input tensor", __func__);
1248 return LayerInputHandle();
1249 }
1250
James Ward4e22f602020-10-20 15:50:33 +01001251 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001252 }
1253 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001254 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001255 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001256 // The tensor is either an operand internal to the model, or a model input.
1257 // It can be associated with an ArmNN output slot for an existing layer.
1258
1259 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1260 const uint32_t operandIndex = operation.inputs[inputIndex];
1261 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001262 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001263 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001264 case HalOperandLifeTime::CONSTANT_REFERENCE:
1265 {
1266 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001267 ConstTensorPin tensorPin =
1268 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1269
Sadik Armagan44bcc022019-06-18 17:21:36 +01001270 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001271 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001272 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001273 armnn::BackendId setBackend;
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001274 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1275 IsConstantSupported,
1276 data.m_Backends,
1277 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001278 setBackend,
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001279 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001280 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001281 {
1282 return LayerInputHandle();
1283 }
1284
1285 armnn::IConnectableLayer* constantLayer =
1286 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Cathal Corbett8de96f72022-09-01 13:34:59 +01001287 constantLayer->SetBackendId(setBackend);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001288 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001289 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1290 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001291
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001292 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001293 }
1294 else
1295 {
1296 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001297 return LayerInputHandle();
1298 }
arovir01b0717b52018-09-05 17:03:25 +01001299 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001300 default:
arovir01b0717b52018-09-05 17:03:25 +01001301 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001302 // Unsupported lifetime for an input tensor
1303 Fail("%s: unsupported lifetime for input tensor: %s",
1304 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001305 return LayerInputHandle();
1306 }
arovir01b0717b52018-09-05 17:03:25 +01001307 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001308 }
1309 catch (UnsupportedOperand<HalOperandType>& e)
1310 {
1311 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1312 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001313 }
1314}
1315
Kevin May42477c12020-03-26 13:34:14 +00001316
1317#ifdef ARMNN_ANDROID_NN_V1_3
1318template<typename HalPolicy>
1319LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1320 uint32_t inputIndex,
1321 const::android::hardware::neuralnetworks::V1_3::Model& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001322 ConversionData& data,
1323 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
Kevin May42477c12020-03-26 13:34:14 +00001324{
1325 using HalOperand = typename HalPolicy::Operand;
1326 using HalOperandType = typename HalPolicy::OperandType;
1327 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1328
1329 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1330 if (!operand)
1331 {
1332 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1333 return LayerInputHandle();
1334 }
1335
1336 if (!IsOperandTypeSupportedForTensors(operand->type))
1337 {
1338 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1339 return LayerInputHandle();
1340 }
1341
1342 try
1343 {
1344 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001345
Kevin May42477c12020-03-26 13:34:14 +00001346 if (IsDynamicTensor(operandTensorInfo))
1347 {
Finn Williams291a16b2020-08-19 22:54:00 +01001348 data.m_DynamicInputsEncountered = true;
1349
Finn Williams9a044412020-08-17 19:08:35 +01001350 const uint32_t operandIndex = operation.inputs[inputIndex];
1351
1352 // Check if the dynamic input tensors have been inferred by one of the previous layers
1353 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001354 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001355 {
1356 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1357 }
1358 else
1359 {
1360 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1361 return LayerInputHandle();
1362 }
Kevin May42477c12020-03-26 13:34:14 +00001363 }
1364
1365 switch (operand->lifetime)
1366 {
1367 case HalOperandLifeTime::SUBGRAPH_INPUT:
1368 {
1369 // NOTE: We must check whether we can support the input tensor on at least one
1370 // of the provided backends; otherwise we cannot convert the operation
1371 bool isInputSupported = false;
1372 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1373 IsInputSupported,
1374 data.m_Backends,
1375 isInputSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001376 armnn::BackendId(),
Kevin May42477c12020-03-26 13:34:14 +00001377 operandTensorInfo);
1378
1379 if (!isInputSupported)
1380 {
1381 Fail("%s: unsupported input tensor", __func__);
1382 return LayerInputHandle();
1383 }
1384
James Ward4e22f602020-10-20 15:50:33 +01001385 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001386 }
1387 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1388 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1389 {
1390 // The tensor is either an operand internal to the model, or a model input.
1391 // It can be associated with an ArmNN output slot for an existing layer.
1392
1393 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1394 const uint32_t operandIndex = operation.inputs[inputIndex];
1395 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1396 }
1397 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1398 case HalOperandLifeTime::CONSTANT_REFERENCE:
1399 {
1400 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001401 ConstTensorPin tensorPin =
1402 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1403
Kevin May42477c12020-03-26 13:34:14 +00001404 if (tensorPin.IsValid())
1405 {
1406 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001407 armnn::BackendId setBackend;
Kevin May42477c12020-03-26 13:34:14 +00001408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1409 IsConstantSupported,
1410 data.m_Backends,
1411 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001412 setBackend,
Kevin May42477c12020-03-26 13:34:14 +00001413 tensorPin.GetConstTensor().GetInfo());
1414 if (!isSupported)
1415 {
1416 return LayerInputHandle();
1417 }
1418
1419 armnn::IConnectableLayer* constantLayer =
1420 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Cathal Corbett8de96f72022-09-01 13:34:59 +01001421 constantLayer->SetBackendId(setBackend);
Kevin May42477c12020-03-26 13:34:14 +00001422 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001423 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1424 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001425
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001426 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001427 }
1428 else
1429 {
1430 Fail("%s: invalid operand tensor", __func__);
1431 return LayerInputHandle();
1432 }
1433 break;
1434 }
1435 default:
1436 {
1437 // Unsupported lifetime for an input tensor
1438 Fail("%s: unsupported lifetime for input tensor: %s",
1439 __func__, toString(operand->lifetime).c_str());
1440 return LayerInputHandle();
1441 }
1442 }
1443 }
1444 catch (UnsupportedOperand<HalOperandType>& e)
1445 {
1446 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1447 return LayerInputHandle();
1448 }
1449}
1450#endif
1451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001452template<typename HalPolicy,
1453 typename HalOperation = typename HalPolicy::Operation,
1454 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001455bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1456 uint32_t operationOutputIndex,
1457 armnn::IConnectableLayer& layer,
1458 uint32_t layerOutputIndex,
1459 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001460 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001461 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001462 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001463 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001464 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001465{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001466 using HalOperand = typename HalPolicy::Operand;
1467
1468 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001469 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1470 {
1471 return false;
1472 }
1473
1474 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001475 if (overrideOutputInfo == nullptr)
1476 {
1477 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1478 }
1479 else
1480 {
1481 outputSlot.SetTensorInfo(*overrideOutputInfo);
1482 }
1483
Finn Williamsa4983ce2020-07-23 12:55:12 +01001484 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001485 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001486 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001487 // Type one dynamic tensors require the previous layer's output shape for inference
1488 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1489 {
Mike Kellye2d611e2021-10-14 12:35:58 +01001490 if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
Sadik Armagandbda4b72020-09-03 11:33:07 +01001491 {
1492 return false;
1493 }
1494 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001495 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001496 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001497 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001498 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1499
Sadik Armagandbda4b72020-09-03 11:33:07 +01001500 if(!isSupported)
1501 {
1502 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1503 {
1504 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1505 }
1506 return false;
1507 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001508 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001509
Finn Williamsa4983ce2020-07-23 12:55:12 +01001510 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001511
1512 if (activationFunction != ActivationFn::kActivationNone)
1513 {
1514 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1515 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1516 &layer, data);
1517
1518 if (!endLayer)
1519 {
1520 return Fail("%s: ProcessActivation failed", __func__);
1521 }
1522
1523 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1524 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1525 }
1526 else
1527 {
1528 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1529 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001530
Mike Kellyb5fdf382019-06-11 16:35:25 +01001531 return true;
1532}
1533
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001534template<typename HalPolicy,
1535 typename HalOperation = typename HalPolicy::Operation,
1536 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001537armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1538 uint32_t inputIndex,
1539 const HalModel& model,
1540 ConversionData& data)
1541{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001542 using HalOperand = typename HalPolicy::Operand;
1543
1544 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001545 if (!operand)
1546 {
1547 return armnn::DataLayout::NHWC;
1548 }
1549
1550 if (!IsBool(*operand))
1551 {
1552 return armnn::DataLayout::NHWC;
1553 }
1554
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001555 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001556 if (!valueAddress)
1557 {
1558 return armnn::DataLayout::NHWC;
1559 }
1560
1561 if (*(static_cast<const bool*>(valueAddress)))
1562 {
1563 return armnn::DataLayout::NCHW;
1564 }
1565 else
1566 {
1567 return armnn::DataLayout::NHWC;
1568 }
1569}
1570
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001571template<typename HalPolicy,
1572 typename HalOperation = typename HalPolicy::Operation,
1573 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001574bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1575 uint32_t outputIndex,
1576 armnn::IConnectableLayer& layer,
1577 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001578 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001579 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001580 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1581 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001582{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001583 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1584 outputIndex,
1585 layer,
1586 outputIndex,
1587 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001588 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001589 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001590 validateFunc,
1591 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001592}
1593
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001594template<typename HalPolicy,
1595 typename HalOperation = typename HalPolicy::Operation,
1596 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001597bool ConvertToActivation(const HalOperation& operation,
1598 const char* operationName,
1599 const armnn::ActivationDescriptor& activationDesc,
1600 const HalModel& model,
1601 ConversionData& data)
1602{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001603 using HalOperand = typename HalPolicy::Operand;
1604
1605 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001606 if (!input.IsValid())
1607 {
1608 return Fail("%s: Input 0 is invalid", operationName);
1609 }
1610
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001611 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001612 if (!outputOperand)
1613 {
1614 return false;
1615 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001616
1617 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001618
1619 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001620 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001621 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1622 {
1623 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1624 IsActivationSupported,
1625 data.m_Backends,
1626 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001627 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001628 input.GetTensorInfo(),
1629 outInfo,
1630 activationDesc);
1631 };
1632
1633 if(IsDynamicTensor(outInfo))
1634 {
1635 isSupported = AreDynamicTensorsSupported();
1636 }
1637 else
1638 {
1639 validateFunc(outInfo, isSupported);
1640 }
1641
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001642 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001643 {
1644 return false;
1645 }
1646
1647 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01001648 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01001649 if (!layer)
1650 {
1651 return Fail("%s: Could not add the ActivationLayer", __func__);
1652 }
arovir01b0717b52018-09-05 17:03:25 +01001653 input.Connect(layer->GetInputSlot(0));
1654
Finn Williamsa4983ce2020-07-23 12:55:12 +01001655 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001656}
1657
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001658template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001659 typename HalOperation = typename HalPolicy::Operation,
1660 typename HalModel = typename HalPolicy::Model>
1661bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1662{
1663 armnn::ActivationDescriptor desc;
1664 desc.m_Function = armnn::ActivationFunction::ReLu;
1665
1666 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1667}
1668
1669template<typename HalPolicy,
1670 typename HalOperation = typename HalPolicy::Operation,
1671 typename HalModel = typename HalPolicy::Model>
1672bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1673{
1674 armnn::ActivationDescriptor desc;
1675 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1676 desc.m_A = 1.0f;
1677 desc.m_B = -1.0f;
1678
1679 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1680}
1681
1682template<typename HalPolicy,
1683 typename HalOperation = typename HalPolicy::Operation,
1684 typename HalModel = typename HalPolicy::Model>
1685bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1686{
1687 armnn::ActivationDescriptor desc;
1688 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1689 desc.m_A = 6.0f;
1690
1691 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1692}
1693
1694template<typename HalPolicy,
1695 typename HalOperation = typename HalPolicy::Operation,
1696 typename HalModel = typename HalPolicy::Model>
1697bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1698{
1699 armnn::ActivationDescriptor desc;
1700 desc.m_Function = armnn::ActivationFunction::TanH;
1701 desc.m_A = 1.0f; // android nn does not support tanH parameters
1702 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1703
1704 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1705}
1706
1707template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001708 typename HalOperation = typename HalPolicy::Operation,
1709 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001710bool ConvertPaddings(const HalOperation& operation,
1711 const HalModel& model,
1712 ConversionData& data,
1713 unsigned int rank,
1714 armnn::PadDescriptor& padDescriptor)
1715{
1716 using HalOperand = typename HalPolicy::Operand;
1717
1718 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1719 if (!paddingsOperand)
1720 {
1721 return Fail("%s: Could not read paddings operand", __func__);
1722 }
1723
1724 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1725 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1726 {
1727 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1728 }
1729
1730 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001731 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1732 {
1733 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1734 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001735
1736 // add padding for each dimension of input tensor.
1737 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1738 {
1739 int paddingBeforeInput = paddings[i];
1740 int paddingAfterInput = paddings[i + 1];
1741
1742 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1743 {
1744 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1745 }
1746
1747 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1748 }
1749
1750 return true;
1751}
1752
1753template<typename HalPolicy,
1754 typename HalOperation = typename HalPolicy::Operation,
1755 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001756bool ConvertPooling2d(const HalOperation& operation,
1757 const char* operationName,
1758 armnn::PoolingAlgorithm poolType,
1759 const HalModel& model,
1760 ConversionData& data)
1761{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001762 using HalOperand = typename HalPolicy::Operand;
1763 using HalOperandType = typename HalPolicy::OperandType;
1764
1765 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001766 if (!input.IsValid())
1767 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001768 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
1770
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001771 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001772 if (!output)
1773 {
1774 return Fail("%s: Could not read output 0", __func__);
1775 }
1776
1777 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1778 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1779
arovir01b0717b52018-09-05 17:03:25 +01001780 armnn::Pooling2dDescriptor desc;
1781 desc.m_PoolType = poolType;
1782 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001783 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001784
1785 ActivationFn activation;
1786
Sadik Armagan15d63e22019-07-26 16:59:35 +01001787 auto inputSize = operation.inputs.size();
1788
1789 if (inputSize >= 10)
1790 {
1791 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1792 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1793 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1794 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1795 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1796 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1797 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1798 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1799 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1800 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1801 {
1802 return Fail("%s: Operation has invalid inputs", operationName);
1803 }
1804
Kevin May42477c12020-03-26 13:34:14 +00001805 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001806 {
1807 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1808 }
1809 }
1810 else
arovir01b0717b52018-09-05 17:03:25 +01001811 {
1812 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1813 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001814 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1815 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1816 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1817 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1818 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1819 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001820 {
1821 return Fail("%s: Operation has invalid inputs", operationName);
1822 }
1823
Kevin May42477c12020-03-26 13:34:14 +00001824 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001825 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001826 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001827 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001828
1829 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1830 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1831 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1832
1833 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1834 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001835 }
1836
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001837 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001838 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001839 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1840 {
1841 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1842 IsPooling2dSupported,
1843 data.m_Backends,
1844 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001845 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001846 inputInfo,
1847 outputInfo,
1848 desc);
1849
1850 };
1851
1852 if(IsDynamicTensor(outputInfo))
1853 {
1854 isSupported = AreDynamicTensorsSupported();
1855 }
1856 else
1857 {
1858 validateFunc(outputInfo, isSupported);
1859 }
1860
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001861 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001862 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001863 return false;
arovir01b0717b52018-09-05 17:03:25 +01001864 }
arovir01b0717b52018-09-05 17:03:25 +01001865
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001866 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01001867 pooling2dLayer->SetBackendId(setBackend);
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001868 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001869 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001870 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001871 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001872
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001873 input.Connect(pooling2dLayer->GetInputSlot(0));
1874
Finn Williamsa4983ce2020-07-23 12:55:12 +01001875 if (!isSupported)
1876 {
1877 return false;
1878 }
1879
Kevin Mayfcf2a152020-09-08 16:06:32 +01001880 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1881 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001882}
1883
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001884template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001885 typename HalOperation = typename HalPolicy::Operation,
1886 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001887bool ConvertArgMinMax(const HalOperation& operation,
1888 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001889 ConversionData& data,
1890 armnn::ArgMinMaxFunction argMinMaxFunction)
1891{
1892 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1893
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001894 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001895 using HalOperandType = typename HalPolicy::OperandType;
1896
1897 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1898
1899 if (!input0.IsValid())
1900 {
1901 return Fail("%s: Operation has invalid inputs", __func__);
1902 }
1903
1904 int32_t axis;
1905 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1906 {
1907 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1908 }
1909
1910 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1911 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1912
1913 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1914 {
1915 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1916 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1917 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1918 return Fail("%s: Axis must be in range [-n, n)", __func__);
1919 }
1920
1921 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1922 if (!output)
1923 {
1924 return Fail("%s: Could not read output 0", __func__);
1925 }
1926
1927 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1928
1929 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001930
1931 armnn::ArgMinMaxDescriptor descriptor;
1932 descriptor.m_Function = argMinMaxFunction;
1933 descriptor.m_Axis = axis;
1934
1935 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001936 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001937 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1938 {
1939 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1940 IsArgMinMaxSupported,
1941 data.m_Backends,
1942 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001943 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001944 inputInfo0,
1945 outputInfo,
1946 descriptor);
1947 };
1948
1949 if(IsDynamicTensor(outputInfo))
1950 {
1951 isSupported = AreDynamicTensorsSupported();
1952 }
1953 else
1954 {
1955 validateFunc(outputInfo, isSupported);
1956 }
1957
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001958 if (!isSupported)
1959 {
1960 return false;
1961 }
1962
1963 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01001964 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01001965 if (!layer)
1966 {
1967 return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
1968 }
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001969 input0.Connect(layer->GetInputSlot(0));
1970
Finn Williamsa4983ce2020-07-23 12:55:12 +01001971 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001972}
1973
1974template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001975 typename HalOperation = typename HalPolicy::Operation,
1976 typename HalModel = typename HalPolicy::Model>
1977bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001978{
Keith Davis6e4081f2020-09-03 13:17:21 +01001979 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001980 using HalOperandType = typename HalPolicy::OperandType;
1981
1982 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1983 if (operation.inputs.size() <= 1)
1984 {
1985 return Fail("%s: Operation has insufficient arguments", __func__);
1986 }
1987
1988 // Get inputs and outputs
1989 const std::size_t numInputTensors = operation.inputs.size() - 1;
1990
1991 int32_t concatDim;
1992 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1993 {
1994 return Fail("%s: Operation has invalid inputs", __func__);
1995 }
1996
1997 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1998 if (!outputOperand)
1999 {
2000 return Fail("%s: Operation has no outputs", __func__);
2001 }
2002
Keith Davis6e4081f2020-09-03 13:17:21 +01002003 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2004 armnn::TensorShape outputShape = outputInfo.GetShape();
2005 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002006 //
2007 // handle negative concat dims along the lines of tensorflow as described here:
2008 // https://www.tensorflow.org/api_docs/python/tf/concat
2009 // "negative axis refers to axis + rank(values)-th dimension"
2010 //
2011 if (concatDim < 0)
2012 {
2013 concatDim += outputShape.GetNumDimensions();
2014 }
2015
2016 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2017 {
2018 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2019 }
2020
2021 std::vector<LayerInputHandle> inputHandles;
2022 std::vector<armnn::TensorShape> inputShapes;
2023
2024 inputHandles.reserve(numInputTensors);
2025 inputShapes.reserve(numInputTensors);
2026
Keith Davis6e4081f2020-09-03 13:17:21 +01002027 bool inputsHaveBeenReshaped = false;
2028 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002029 for (uint32_t i = 0; i < numInputTensors; ++i)
2030 {
2031 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2032 if (!operand)
2033 {
2034 return Fail("%s: Operation has invalid inputs", __func__);
2035 }
2036
Teresa Charlin3b959602019-10-31 17:05:47 +00002037 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2038 if (!operandInputHandle.IsValid())
2039 {
2040 return Fail("%s: Operation has invalid inputs", __func__);
2041 }
Mike Kellyb8805202019-07-31 17:25:43 +01002042
Keith Davis6e4081f2020-09-03 13:17:21 +01002043 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002044 if (operandShape.GetNumDimensions() == 0)
2045 {
2046 return Fail("%s: Operands with rank 0 are not supported", __func__);
2047 }
2048
2049 if (RequiresReshape(operandShape))
2050 {
2051 inputsHaveBeenReshaped = true;
2052
2053 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2054
2055 // Expand the tensor to three dimensions
2056 if (operandShape.GetNumDimensions() == 2)
2057 {
2058 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2059 tensorDimensionsAdded = 1;
2060 }
2061 else
2062 {
2063 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2064 tensorDimensionsAdded = 2;
2065 }
2066
Kevin Mayaed08ac2019-12-12 16:33:31 +00002067 armnn::ReshapeDescriptor reshapeDescriptor;
2068 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2069
2070 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002071 armnn::BackendId setBackendReshape;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002072 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2073 IsReshapeSupported,
2074 data.m_Backends,
2075 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002076 setBackendReshape,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002077 operandInputHandle.GetTensorInfo(),
2078 reshapeInfo,
2079 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002080
Kevin Mayaed08ac2019-12-12 16:33:31 +00002081 if (!isSupported)
2082 {
2083 return false;
2084 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002085 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002086 newReshape.SetBackendId(setBackendReshape);
Mike Kellyb8805202019-07-31 17:25:43 +01002087
2088 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002089 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002090 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2091 }
2092
2093 inputShapes.emplace_back(operandShape);
2094 inputHandles.emplace_back(operandInputHandle);
2095
2096 if (!inputHandles.back().IsValid())
2097 {
2098 return Fail("%s: Operation has invalid inputs", __func__);
2099 }
2100 }
2101
Mike Kellye2d611e2021-10-14 12:35:58 +01002102 if (inputShapes.size() != inputHandles.size())
2103 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002104 return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002105 inputShapes.size(), inputHandles.size());
2106 }
Mike Kellyb8805202019-07-31 17:25:43 +01002107
2108 if (inputsHaveBeenReshaped)
2109 {
2110 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2111 concatDim += tensorDimensionsAdded;
2112
2113 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2114 if (tensorDimensionsAdded == 1)
2115 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002116 if (IsDynamicTensor(outputInfo))
2117 {
2118 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2119 }
2120 else
2121 {
2122 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2123 }
Mike Kellyb8805202019-07-31 17:25:43 +01002124 }
2125 else if (tensorDimensionsAdded == 2)
2126 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002127 if (IsDynamicTensor(outputInfo))
2128 {
2129 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2130 }
2131 else
2132 {
2133 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2134 }
Mike Kellyb8805202019-07-31 17:25:43 +01002135 }
2136 }
2137
2138 // Check if permutations is required and get the pair of permutations required for the concatenation.
2139 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2140 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002141 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002142 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2143 concatDim,
2144 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002145
Keith Davis6e4081f2020-09-03 13:17:21 +01002146 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2147 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002148 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002149 if (needPermute)
2150 {
2151 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2152 }
2153
2154 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002155 }
Mike Kellyb8805202019-07-31 17:25:43 +01002156 // this is no-op for identity swizzles, otherwise it replaces both
2157 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002158 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002159 {
2160 return false;
2161 }
Mike Kellyb8805202019-07-31 17:25:43 +01002162
2163 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2164 armnn::OriginsDescriptor concatDescriptor;
2165
2166 try
2167 {
2168 // The concat descriptor is always created across the only supported concat dimension
2169 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002170 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2171 inputShapes.end(),
2172 concatDim);
2173 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002174 {
2175 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2176 }
2177
2178 // Validate the output shape is correct given the input shapes based on the
2179 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002180 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002181 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002182 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2183 {
2184 return Fail("%s: Error validating the output shape for concat", __func__);
2185 }
Mike Kellyb8805202019-07-31 17:25:43 +01002186 }
2187
2188 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2189 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002190 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002191
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002193 armnn::BackendId setBackendConcat;
Keith Davis6e4081f2020-09-03 13:17:21 +01002194 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
Cathal Corbett8de96f72022-09-01 13:34:59 +01002195 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2196 IsConcatSupported,
2197 data.m_Backends,
2198 isSupported,
2199 setBackendConcat,
2200 inputTensorInfos,
2201 outputInfo,
2202 concatDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002203 };
2204
2205 if (!isDynamicTensor)
2206 {
2207 validateFunc(outputInfo, isSupported);
2208 }
2209 else
2210 {
2211 isSupported = AreDynamicTensorsSupported();
2212 }
2213
Mike Kellyb8805202019-07-31 17:25:43 +01002214 if (!isSupported)
2215 {
2216 return false;
2217 }
2218
2219 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002220 layer->SetBackendId(setBackendConcat);
Mike Kellye2d611e2021-10-14 12:35:58 +01002221 if (!layer)
2222 {
2223 return Fail("%s: Could not add the ConcatLayer", __func__);
2224 }
Mike Kellyb8805202019-07-31 17:25:43 +01002225 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002226 // Connect inputs to the layer
2227 const int numInputSlots = layer->GetNumInputSlots();
Mike Kellye2d611e2021-10-14 12:35:58 +01002228
2229 if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2230 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002231 return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002232 static_cast<std::size_t>(numInputSlots), inputHandles.size());
2233 }
Mike Kellyb8805202019-07-31 17:25:43 +01002234 for (int i = 0; i < numInputSlots; ++i)
2235 {
2236 // connect the input directly to the merge (concat) layer
Mike Kelly1b46d132021-11-03 11:12:45 +00002237 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
Mike Kellyb8805202019-07-31 17:25:43 +01002238 }
2239
Keith Davis6e4081f2020-09-03 13:17:21 +01002240 // Transpose the output shape
2241 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002242 armnn::TransposeDescriptor transposeDesc;
2243 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002244 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2245 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2246 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002247 isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002248 armnn::BackendId setBackendTranspose;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002249 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002250 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002251 data.m_Backends,
2252 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002253 setBackendTranspose,
Teresa Charlin185f5882020-04-06 21:59:18 +01002254 inputTransposeInfo,
2255 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002256 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002257 if (!isSupported)
2258 {
2259 return false;
2260 }
Mike Kellyb8805202019-07-31 17:25:43 +01002261 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002262 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002263 permutationPair.second);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002264 deswizzleLayer.SetBackendId(setBackendTranspose);
Mike Kellyb8805202019-07-31 17:25:43 +01002265 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002266
2267 return true;
2268 };
2269
2270 if (needPermute && !isDynamicTensor)
2271 {
2272 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002273 }
2274
2275 if (inputsHaveBeenReshaped)
2276 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002277 if (isDynamicTensor)
2278 {
2279 // Infer the output shapes of concat if outputs are type 1 dynamic
Mike Kellye2d611e2021-10-14 12:35:58 +01002280 if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2281 {
2282 return Fail("%s: TensorInfo is not set", __func__);
2283 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002284 if (!ValidateConcatOutputShape(inputShapes,
2285 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2286 concatDim))
2287 {
2288 return Fail("%s: Error validating the output shape for concat", __func__);
2289 }
2290 transposeOutputShape();
2291 }
2292
Mike Kellyb8805202019-07-31 17:25:43 +01002293 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002294 // Undo the reshape knowing the amount of dimensions added
2295 if (tensorDimensionsAdded == 1)
2296 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002297 afterConcatInfo.SetShape(
2298 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002299 }
2300 else if (tensorDimensionsAdded == 2)
2301 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002302 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002303 }
2304
Kevin Mayaed08ac2019-12-12 16:33:31 +00002305 armnn::ReshapeDescriptor reshapeDescriptor;
2306 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002307 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002308
Keith Davis6e4081f2020-09-03 13:17:21 +01002309 isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002310 armnn::BackendId setBackendReshape2;
Keith Davis6e4081f2020-09-03 13:17:21 +01002311 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2312 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2313 IsReshapeSupported,
2314 data.m_Backends,
2315 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002316 setBackendReshape2,
Keith Davis6e4081f2020-09-03 13:17:21 +01002317 concatInfo,
2318 afterConcatInfo,
2319 reshapeDescriptor);
2320 };
2321
2322 if (!IsDynamicTensor(afterConcatInfo))
2323 {
2324 validateReshapeFunc(afterConcatInfo, isSupported);
2325 }
2326 else
2327 {
2328 isSupported = AreDynamicTensorsSupported();
2329 }
2330
Kevin Mayaed08ac2019-12-12 16:33:31 +00002331 if (!isSupported)
2332 {
2333 return false;
2334 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002335 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002336 layer->SetBackendId(setBackendReshape2);
Keith Davis6e4081f2020-09-03 13:17:21 +01002337 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2338 0,
2339 *layer,
2340 model,
2341 data,
2342 nullptr,
2343 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002344 }
2345
Keith Davis6e4081f2020-09-03 13:17:21 +01002346 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002347}
2348
2349template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002350 typename HalOperation = typename HalPolicy::Operation,
2351 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002352bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2353{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002354 using HalOperand = typename HalPolicy::Operand;
2355 using HalOperandType = typename HalPolicy::OperandType;
2356
2357 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002358 if (!input.IsValid())
2359 {
2360 return Fail("%s: Operation has invalid inputs", __func__);
2361 }
2362
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002363 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002364 if (!output)
2365 {
2366 return Fail("%s: Could not read output 0", __func__);
2367 }
2368
2369 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002370 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002371
Keith Davis8f22bed2022-04-29 10:57:27 +01002372 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2373 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002374 {
2375 return Fail("%s: Operation has invalid inputs", __func__);
2376 }
2377
Keith Davis8f22bed2022-04-29 10:57:27 +01002378 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2379 if (!biasInput.IsValid())
2380 {
2381 return Fail("%s: Operation has invalid inputs", __func__);
2382 }
2383
2384 biasInput.SanitizeQuantizationScale(weightsInput, input);
2385 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2386 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002387
2388 armnn::Convolution2dDescriptor desc;
2389 desc.m_DataLayout = armnn::DataLayout::NHWC;
2390 ActivationFn activation;
2391
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002392 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002393 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002394 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2395 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2396 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2397 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2398 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2399 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002400 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002401 {
2402 return Fail("%s: Operation has invalid inputs", __func__);
2403 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002404 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002405 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002406 {
2407 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002408 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2409 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2410 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002411 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002412 {
2413 return Fail("%s: Operation has invalid inputs", __func__);
2414 }
2415
Keith Davis8f22bed2022-04-29 10:57:27 +01002416 const uint32_t kernelX = weightsInfo.GetShape()[2];
2417 const uint32_t kernelY = weightsInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002418 const uint32_t inputX = inputInfo.GetShape()[2];
2419 const uint32_t inputY = inputInfo.GetShape()[1];
2420
2421 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2422 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002423 }
2424 else
2425 {
2426 return Fail("%s: Unsupported number of operation inputs", __func__);
2427 }
2428
2429 desc.m_BiasEnabled = true;
Keith Davis8f22bed2022-04-29 10:57:27 +01002430 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002431
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002432 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002433 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002434 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2435 {
2436 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2437 IsConvolution2dSupported,
2438 data.m_Backends,
2439 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002440 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002441 inputInfo,
2442 outputInfo,
2443 desc,
Keith Davis8f22bed2022-04-29 10:57:27 +01002444 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002445 biases);
2446 };
2447
2448 if(!IsDynamicTensor(outputInfo))
2449 {
2450 validateFunc(outputInfo, isSupported);
2451 }
2452 else
2453 {
2454 isSupported = AreDynamicTensorsSupported();
2455 }
2456
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002457 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002458 {
2459 return false;
2460 }
2461
Keith Davis8f22bed2022-04-29 10:57:27 +01002462 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002463 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002464
2465 if (!startLayer)
2466 {
2467 return Fail("%s: AddConvolution2dLayer failed", __func__);
2468 }
2469
Mike Kellyb5fdf382019-06-11 16:35:25 +01002470 input.Connect(startLayer->GetInputSlot(0));
2471
Keith Davis8f22bed2022-04-29 10:57:27 +01002472 // Connect weights and bias inputs
2473 weightsInput.Connect(startLayer->GetInputSlot(1));
2474 biasInput.Connect(startLayer->GetInputSlot(2));
2475
Kevin Mayfcf2a152020-09-08 16:06:32 +01002476 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2477 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002478}
2479
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002480template<typename HalPolicy,
2481 typename HalOperation = typename HalPolicy::Operation,
2482 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002483bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2484{
2485 using HalOperand = typename HalPolicy::Operand;
2486 using HalOperandType = typename HalPolicy::OperandType;
2487
2488 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2489 if (!input.IsValid() )
2490 {
2491 return Fail("%s: Operation has invalid inputs", __func__);
2492 }
2493
2494 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2495 unsigned int rank = inputInfo.GetNumDimensions();
2496 if (rank != 4)
2497 {
2498 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2499 }
2500
2501 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2502 if (!output)
2503 {
2504 return Fail("%s: Could not read output 0", __func__);
2505 }
2506
2507 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002508
2509 armnn::DepthToSpaceDescriptor descriptor;
2510
2511 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2512 if (descriptor.m_BlockSize <= 1)
2513 {
Kevin May17de62e2023-07-31 12:16:04 +01002514 return Fail("%s: Block size must be at least 1 in all dimensions", __func__);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002515 }
2516
2517 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002518 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002519 {
2520 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2521 }
2522
2523 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002524 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002525 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2526 {
2527 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2528 IsDepthToSpaceSupported,
2529 data.m_Backends,
2530 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002531 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002532 inputInfo,
2533 outputInfo,
2534 descriptor);
2535 };
2536
2537 if(!IsDynamicTensor(outputInfo))
2538 {
2539 validateFunc(outputInfo, isSupported);
2540 }
2541 else
2542 {
2543 isSupported = AreDynamicTensorsSupported();
2544 }
2545
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002546 if (!isSupported)
2547 {
2548 return false;
2549 }
2550
2551 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002552 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002553 if (!layer)
2554 {
2555 return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2556 }
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002557 input.Connect(layer->GetInputSlot(0));
2558
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002559 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002560}
2561
2562template<typename HalPolicy,
2563 typename HalOperation = typename HalPolicy::Operation,
2564 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002565bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2566{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002567 using HalOperand = typename HalPolicy::Operand;
2568 using HalOperandType = typename HalPolicy::OperandType;
2569
2570 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002571
2572 if (!input.IsValid())
2573 {
2574 return Fail("%s: Operation has invalid inputs", __func__);
2575 }
2576
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002577 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002578
2579 if (!output)
2580 {
2581 return Fail("%s: Could not read output 0", __func__);
2582 }
2583
2584 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002585 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002586
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002587 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002588 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002589 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Cathal Corbett915f2a72022-04-15 14:12:08 +01002590 if (!weightsOperand)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002591 {
Cathal Corbett915f2a72022-04-15 14:12:08 +01002592 return Fail("%s: Could not read weights", __func__);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002593 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002594 // Basic sanity check on the weights shape.
2595 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2596 // [1, filter_height, filter_width, depth_out]
2597 if (weightsOperand->dimensions[0] != 1)
2598 {
2599 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2600 }
2601
Mike Kellyb5fdf382019-06-11 16:35:25 +01002602 armnn::DepthwiseConvolution2dDescriptor desc;
2603 desc.m_DataLayout = armnn::DataLayout::NHWC;
2604
Cathal Corbett915f2a72022-04-15 14:12:08 +01002605 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2606 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002607 {
2608 return Fail("%s: Operation has invalid inputs", __func__);
2609 }
2610
Cathal Corbett915f2a72022-04-15 14:12:08 +01002611 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2612 if (!biasOperand)
2613 {
2614 return Fail("%s: Could not read bias", __func__);
2615 }
2616
2617 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2618 if (!biasInput.IsValid())
2619 {
2620 return Fail("%s: Operation has invalid inputs", __func__);
2621 }
2622
2623 biasInput.SanitizeQuantizationScale(weightsInput, input);
2624 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2625 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002626
2627 ActivationFn activation;
2628
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002629 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002630 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002631 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2632 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2633 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2634 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2635 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2636 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002637 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002638 {
2639 return Fail("%s: Operation has invalid inputs", __func__);
2640 }
2641 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002642 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002643 {
2644 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002645 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2646 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2647 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002648 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002649 {
2650 return Fail("%s: Operation has invalid inputs", __func__);
2651 }
2652
Cathal Corbett915f2a72022-04-15 14:12:08 +01002653 const uint32_t kernelX = weightsInfo.GetShape()[2];
2654 const uint32_t kernelY = weightsInfo.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002655 const uint32_t inputX = inputInfo.GetShape()[2];
2656 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002657
2658 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2659 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2660 }
2661 else
2662 {
2663 return Fail("%s: Unsupported number of operation inputs", __func__);
2664 }
2665
2666 desc.m_BiasEnabled = true;
Cathal Corbett915f2a72022-04-15 14:12:08 +01002667 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002668
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002669 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002670 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002671 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2672 {
2673 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2674 IsDepthwiseConvolutionSupported,
2675 data.m_Backends,
2676 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002677 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002678 inputInfo,
2679 outputInfo,
2680 desc,
Cathal Corbett915f2a72022-04-15 14:12:08 +01002681 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002682 biases);
2683 };
2684
2685 if(!IsDynamicTensor(outputInfo))
2686 {
2687 validateFunc(outputInfo, isSupported);
2688 }
2689 else
2690 {
2691 isSupported = AreDynamicTensorsSupported();
2692 }
2693
2694
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002695 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002696 {
2697 return false;
2698 }
2699
Cathal Corbett915f2a72022-04-15 14:12:08 +01002700 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002701 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002702 if (!startLayer)
2703 {
2704 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2705 }
2706
Mike Kellyb5fdf382019-06-11 16:35:25 +01002707 input.Connect(startLayer->GetInputSlot(0));
2708
Cathal Corbett915f2a72022-04-15 14:12:08 +01002709 // Connect weights and bias inputs
2710 weightsInput.Connect(startLayer->GetInputSlot(1));
2711 biasInput.Connect(startLayer->GetInputSlot(2));
2712
Kevin Mayfcf2a152020-09-08 16:06:32 +01002713 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2714 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002715}
2716
Mike Kelly3c673942019-07-25 09:26:06 +01002717template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002718 typename HalOperation = typename HalPolicy::Operation,
2719 typename HalModel = typename HalPolicy::Model>
2720bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002721{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002722 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002723
2724 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2725 if (!input.IsValid())
2726 {
2727 return Fail("%s: Operation has invalid input", __func__);
2728 }
2729
Sadik Armagan98c0f662019-11-21 15:54:36 +00002730 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2731 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2732 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2733 {
2734 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2735 }
2736
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002737 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002738 if (!outputOperand)
2739 {
2740 return Fail("%s: Operation has invalid outputs", __func__);
2741 }
2742
2743 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002744
2745 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002746 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002747 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2748 {
2749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2750 IsDequantizeSupported,
2751 data.m_Backends,
2752 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002753 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002754 inputInfo,
2755 outputInfo);
2756 };
2757
2758 if(IsDynamicTensor(outputInfo))
2759 {
2760 isSupported = AreDynamicTensorsSupported();
2761 }
2762 else
2763 {
2764 validateFunc(outputInfo, isSupported);
2765 }
2766
Mike Kelly46272802019-08-14 17:00:48 +01002767 if (!isSupported)
2768 {
2769 return false;
2770 }
2771
2772 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01002773 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002774 if (!layer)
2775 {
2776 return Fail("%s: Could not add the DequantizeLayer", __func__);
2777 }
Mike Kelly46272802019-08-14 17:00:48 +01002778 input.Connect(layer->GetInputSlot(0));
2779
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002780 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002781}
2782
2783template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002784 typename HalOperation = typename HalPolicy::Operation,
2785 typename HalModel = typename HalPolicy::Model>
Teresa Charlinee5872d2021-12-03 16:07:42 +00002786bool ConvertElementwiseBinary(const HalOperation& operation,
2787 const HalModel& model,
2788 ConversionData& data,
2789 armnn::BinaryOperation binaryOperation)
Mike Kelly46272802019-08-14 17:00:48 +01002790{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002791 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002792
Teresa Charlinee5872d2021-12-03 16:07:42 +00002793 ALOGV("HalPolicy::ConvertElementwiseBinary()");
2794 ALOGV("binaryOperation = %s", GetBinaryOperationAsCString(binaryOperation));
2795
Mike Kelly46272802019-08-14 17:00:48 +01002796 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2797 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2798
2799 if (!input0.IsValid() || !input1.IsValid())
2800 {
2801 return Fail("%s: Operation has invalid inputs", __func__);
2802 }
2803
Teresa Charlinee5872d2021-12-03 16:07:42 +00002804 // The FuseActivation parameter is always the input index 2, and it should be optional
Mike Kelly46272802019-08-14 17:00:48 +01002805 ActivationFn activationFunction;
2806 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2807 {
Teresa Charlinee5872d2021-12-03 16:07:42 +00002808 return Fail("%s: Operation has invalid optional input: activation function", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002809 }
2810
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002811 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002812 if (!output)
2813 {
Teresa Charlinee5872d2021-12-03 16:07:42 +00002814 return Fail("%s: Could not read output", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002815 }
2816
2817 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002818
Teresa Charlinee5872d2021-12-03 16:07:42 +00002819 armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
2820
Mike Kelly46272802019-08-14 17:00:48 +01002821 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002822 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2823 {
2824 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Teresa Charlinee5872d2021-12-03 16:07:42 +00002825 IsElementwiseBinarySupported,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002826 data.m_Backends,
2827 isSupported,
Teresa Charlinee5872d2021-12-03 16:07:42 +00002828 armnn::BackendId(),
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002829 input0.GetTensorInfo(),
2830 input1.GetTensorInfo(),
Teresa Charlinee5872d2021-12-03 16:07:42 +00002831 outputInfo,
2832 binaryOperation);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002833 };
2834
Teresa Charlinee5872d2021-12-03 16:07:42 +00002835 if (!IsDynamicTensor(outputInfo))
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002836 {
2837 validateFunc(outputInfo, isSupported);
2838 }
2839 else
2840 {
2841 isSupported = AreDynamicTensorsSupported();
2842 }
2843
Mike Kelly46272802019-08-14 17:00:48 +01002844 if (!isSupported)
2845 {
2846 return false;
2847 }
2848
Teresa Charlinee5872d2021-12-03 16:07:42 +00002849 armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
2850 if (!layer)
2851 {
2852 return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
2853 }
2854 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
Kevin Mayfcf2a152020-09-08 16:06:32 +01002855 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002856 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002857 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002858 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002859
Teresa Charlinee5872d2021-12-03 16:07:42 +00002860 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc,
2861 activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01002862}
2863
Teresa Charlinee5872d2021-12-03 16:07:42 +00002864
Mike Kelly46272802019-08-14 17:00:48 +01002865template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002866 typename HalOperation = typename HalPolicy::Operation,
2867 typename HalModel = typename HalPolicy::Model>
2868bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002869{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002870 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002871
2872 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2873 if (!input.IsValid())
2874 {
2875 return Fail("%s: Operation has invalid inputs", __func__);
2876 }
2877
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002878 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002879 if (!outputOperand)
2880 {
2881 return Fail("%s: Operation has invalid outputs", __func__);
2882 }
2883
2884 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002885
2886 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002887 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002888 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2889 {
2890 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2891 IsFloorSupported,
2892 data.m_Backends,
2893 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002894 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002895 input.GetTensorInfo(),
2896 outputInfo);
2897 };
2898
2899 if(!IsDynamicTensor(outputInfo))
2900 {
2901 validateFunc(outputInfo, isSupported);
2902 }
2903 else
2904 {
2905 isSupported = AreDynamicTensorsSupported();
2906 }
2907
Mike Kelly46272802019-08-14 17:00:48 +01002908 if (!isSupported)
2909 {
2910 return false;
2911 }
2912
2913 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01002914 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002915 if (!layer)
2916 {
2917 return Fail("%s: Could not add the FloorLayer", __func__);
2918 }
Mike Kelly46272802019-08-14 17:00:48 +01002919 input.Connect(layer->GetInputSlot(0));
2920
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002921 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002922}
2923
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002924inline bool IsQSymm8(const V1_0::Operand&)
2925{
2926 return false;
2927}
2928
Kevin May42477c12020-03-26 13:34:14 +00002929#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002930
2931inline bool IsQSymm8(const V1_2::Operand& operand)
2932{
2933 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2934}
2935
2936#endif
2937
Kevin May42477c12020-03-26 13:34:14 +00002938#ifdef ARMNN_ANDROID_NN_V1_3
2939
2940inline bool IsQSymm8(const V1_3::Operand& operand)
2941{
2942 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2943}
2944
2945#endif
2946
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002947enum class DequantizeStatus
2948{
2949 SUCCESS,
2950 NOT_REQUIRED,
2951 INVALID_OPERAND
2952};
2953
2954using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2955
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002956template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002957 typename HalOperation = typename HalPolicy::Operation,
2958 typename HalModel = typename HalPolicy::Model>
2959DequantizeResult DequantizeIfRequired(size_t operand_index,
2960 const HalOperation& operation,
2961 const HalModel& model,
2962 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002963{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002964 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002965
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002966 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002967 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002968 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002969 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002970 }
2971
2972 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2973 {
2974 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002975 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002976 }
2977
2978 const size_t weightsInputIndex = operation.inputs[operand_index];
2979
2980 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2981 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002982 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002983 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002984 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002985 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002986 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2987 {
2988 continue;
2989 }
2990
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002991 size_t outOpIndex = weightsInputIndex + 1;
2992 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002993 {
2994 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002995 }
2996
2997 if (outOpIndex != weightsInputIndex)
2998 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002999 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003000 }
3001
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003002 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Mike Kellye2d611e2021-10-14 12:35:58 +01003003
3004 if (!operand)
3005 {
3006 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3007 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003008
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003009 if (!IsQSymm8(*operand))
3010 {
3011 // Only supporting dequantize from QSYMM8 to FLOAT
3012 break;
3013 }
3014
3015 // Allocate a new buffer for the dequantized data and manually dequantize
3016 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3017 if (!startValue)
3018 {
3019 // Failed to get the operand address
3020 break;
3021 }
3022
3023 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3024 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003025 const float quantizationScale = operand->scale;
3026
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003027 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3028 for (size_t i = 0; i < dequantizedBufferLength; ++i)
3029 {
3030 float* dstPtr = dequantizedBuffer.get();
Mike Kellye2d611e2021-10-14 12:35:58 +01003031
3032 if (!dstPtr)
3033 {
3034 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3035 }
Mike Kelly1b46d132021-11-03 11:12:45 +00003036 *dstPtr = quantizedBuffer[i] * quantizationScale;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003037 }
3038
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003039 // Construct tensor info for dequantized ConstTensor
3040 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3041 operand->dimensions.data(),
3042 armnn::DataType::Float32);
3043
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003044 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3045 std::move(tensorInfo),
3046 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003047 }
3048
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003049 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003050}
3051
3052template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003053 typename HalOperation = typename HalPolicy::Operation,
3054 typename HalModel = typename HalPolicy::Model>
3055ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3056 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003057 const ConversionData& data,
3058 size_t operandIndex,
3059 bool optional = false)
3060{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003061 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3062
3063 DequantizeStatus status = std::get<3>(dequantized);
3064 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003065 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003066 case DequantizeStatus::INVALID_OPERAND:
3067 {
3068 // return invalid const tensor pin
3069 return ConstTensorPin();
3070 }
3071 case DequantizeStatus::NOT_REQUIRED:
3072 {
3073 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3074 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3075 }
3076 case DequantizeStatus::SUCCESS:
3077 default:
3078 {
3079 return ConstTensorPin(
3080 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3081 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003082 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003083}
3084
3085
Mike Kelly46272802019-08-14 17:00:48 +01003086template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003087 typename HalOperation = typename HalPolicy::Operation,
3088 typename HalModel = typename HalPolicy::Model>
3089bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003090{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003091 using HalOperand = typename HalPolicy::Operand;
3092
Mike Kelly46272802019-08-14 17:00:48 +01003093 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3094 if (!input.IsValid())
3095 {
3096 return Fail("%s: Operation has invalid inputs", __func__);
3097 }
3098
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003099 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003100 if (!output)
3101 {
3102 return Fail("%s: Could not read output 0", __func__);
3103 }
3104
3105 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3106 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3107
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003108 LayerInputHandle weightsInput = LayerInputHandle();
3109 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3110 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003111 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003112 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003113 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003114
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003115 // If weights are constant a separate constant layer will be created to store data.
3116 // Otherwise handle non const weights as inputs.
3117 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3118 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003119 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003120 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003121 }
3122
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003123 LayerInputHandle biasInput = LayerInputHandle();
3124 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3125 if (!biasOperand)
3126 {
3127 return Fail("%s: Could not read bias", __func__);
3128 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003129
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003130 // If bias are constant a separate constant layer will be created to store data.
3131 // Otherwise handle non const bias as inputs.
3132 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3133 if (!biasInput.IsValid())
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003134 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003135 return Fail("%s: Operation has invalid inputs", __func__);
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003136 }
3137
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003138 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003139 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003140 try
3141 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003142 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003143 }
3144 catch (const std::exception& e)
3145 {
Mike Kelly46272802019-08-14 17:00:48 +01003146 return Fail("%s: %s", __func__, e.what());
3147 }
3148
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003149 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003150 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003151 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003152
3153 ActivationFn activationFunction;
3154 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3155 {
3156 return Fail("%s: Operation has invalid inputs", __func__);
3157 }
3158
3159 armnn::FullyConnectedDescriptor desc;
3160 desc.m_TransposeWeightMatrix = true;
3161 desc.m_BiasEnabled = true;
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003162 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003163
3164 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003165 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003166 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3167 {
Finn Williams49184462020-10-02 13:28:34 +01003168 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003169 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003170 outputInfo.GetShape(),
3171 desc.m_TransposeWeightMatrix))
3172 {
3173 isSupported = false;
3174 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3175 return;
3176 }
3177
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003178 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003179 IsFullyConnectedSupported,
3180 data.m_Backends,
3181 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003182 setBackend,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003183 reshapedInfo,
3184 outputInfo,
3185 weightsInfo,
3186 biasInfo,
3187 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003188 };
3189
3190 if(!IsDynamicTensor(outputInfo))
3191 {
3192 validateFunc(outputInfo, isSupported);
3193 }
3194 else
3195 {
3196 isSupported = AreDynamicTensorsSupported();
3197 }
3198
Mike Kelly46272802019-08-14 17:00:48 +01003199 if (!isSupported)
3200 {
3201 return false;
3202 }
3203
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003204 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3205 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003206 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01003207
Kevin Mayfcf2a152020-09-08 16:06:32 +01003208 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003209 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003210 armnn::ReshapeDescriptor reshapeDescriptor;
3211 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003212
Kevin Mayfcf2a152020-09-08 16:06:32 +01003213 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003214 if (!reshapeLayer)
3215 {
3216 return Fail("%s: could not add the reshapeLayer", __func__);
3217 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003218 input.Connect(reshapeLayer->GetInputSlot(0));
3219 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3220 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003221 }
3222 else
3223 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003224 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003225 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003226
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003227 // Connect weights and bias inputs
3228 weightsInput.Connect(startLayer->GetInputSlot(1));
3229 biasInput.Connect(startLayer->GetInputSlot(2));
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003230
Kevin Mayfcf2a152020-09-08 16:06:32 +01003231 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3232 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003233}
3234
3235template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003236 typename HalOperation = typename HalPolicy::Operation,
3237 typename HalModel = typename HalPolicy::Model>
3238bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003239{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003240 using HalOperand = typename HalPolicy::Operand;
3241
Mike Kelly999e2092019-08-15 10:46:46 +01003242 if (operation.inputs.size() != 1)
3243 {
3244 return Fail("%s: Optional inputs are not supported", __func__);
3245 }
3246
Mike Kelly46272802019-08-14 17:00:48 +01003247 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3248 if (!input.IsValid())
3249 {
3250 return Fail("%s: Operation has invalid inputs", __func__);
3251 }
3252
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003253 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003254 if (!output)
3255 {
3256 return Fail("%s: Could not read output 0", __func__);
3257 }
3258
3259 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3260 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3261
Mike Kelly46272802019-08-14 17:00:48 +01003262 if (outputInfo.GetNumDimensions() != 4u)
3263 {
3264 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3265 }
3266
3267 armnn::L2NormalizationDescriptor desc;
3268 desc.m_DataLayout = armnn::DataLayout::NHWC;
3269
3270 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003271 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003272 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3273 {
3274 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3275 IsL2NormalizationSupported,
3276 data.m_Backends,
3277 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003278 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003279 inputInfo,
3280 outputInfo,
3281 desc);
3282 };
3283
3284 if(!IsDynamicTensor(outputInfo))
3285 {
3286 validateFunc(outputInfo, isSupported);
3287 }
3288 else
3289 {
3290 isSupported = AreDynamicTensorsSupported();
3291 }
3292
Mike Kelly46272802019-08-14 17:00:48 +01003293 if (!isSupported)
3294 {
3295 return false;
3296 }
3297
3298 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003299 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003300 if (!layer)
3301 {
3302 return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3303 }
Mike Kelly46272802019-08-14 17:00:48 +01003304 input.Connect(layer->GetInputSlot(0));
3305
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003306 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003307}
3308
3309template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003310 typename HalOperation = typename HalPolicy::Operation,
3311 typename HalModel = typename HalPolicy::Model>
3312bool ConvertLocalResponseNormalization(const HalOperation& operation,
3313 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003314 ConversionData& data)
3315{
Mike Kelly999e2092019-08-15 10:46:46 +01003316 if (operation.inputs.size() != 5)
3317 {
3318 return Fail("%s: Optional inputs are not supported", __func__);
3319 }
3320
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003321 using HalOperand = typename HalPolicy::Operand;
3322 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003323
3324 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3325 if (!input.IsValid())
3326 {
3327 return Fail("%s: Operation has invalid inputs", __func__);
3328 }
3329
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003330 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003331 if (!output)
3332 {
3333 return Fail("%s: Could not read output 0", __func__);
3334 }
3335
3336 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3337 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3338
Mike Kelly46272802019-08-14 17:00:48 +01003339 if (outputInfo.GetNumDimensions() != 4u)
3340 {
3341 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3342 }
3343
3344 armnn::NormalizationDescriptor descriptor;
3345 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3346 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3347 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3348
3349 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003350 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003351 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3352 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3353 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3354 {
3355 return Fail("%s: Operation has invalid inputs", __func__);
3356 }
3357
3358 // ArmNN expects normSize to be the full size of the normalization
3359 // window rather than the radius as in AndroidNN.
3360 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3361
3362 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003363 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003364 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3365 {
3366 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3367 IsNormalizationSupported,
3368 data.m_Backends,
3369 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003370 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003371 inputInfo,
3372 outputInfo,
3373 descriptor);
3374 };
3375
3376 if(!IsDynamicTensor(outputInfo))
3377 {
3378 validateFunc(outputInfo, isSupported);
3379 }
3380 else
3381 {
3382 isSupported = AreDynamicTensorsSupported();
3383 }
3384
Mike Kelly46272802019-08-14 17:00:48 +01003385 if (!isSupported)
3386 {
3387 return false;
3388 }
3389
Mike Kelly46272802019-08-14 17:00:48 +01003390 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003391 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003392 if (!layer)
3393 {
3394 return Fail("%s: Could not add the NormalizationLayer", __func__);
3395 }
Mike Kelly46272802019-08-14 17:00:48 +01003396 input.Connect(layer->GetInputSlot(0));
3397
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003398 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003399}
3400
3401template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003402 typename HalOperation = typename HalPolicy::Operation,
3403 typename HalModel = typename HalPolicy::Model>
3404bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003405{
Mike Kelly46272802019-08-14 17:00:48 +01003406 armnn::ActivationDescriptor desc;
3407 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3408
3409 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3410}
3411
3412template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003413 typename HalOperation = typename HalPolicy::Operation,
3414 typename HalModel = typename HalPolicy::Model>
3415bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003416{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003417 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003418
3419 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3420 if (!input.IsValid())
3421 {
3422 return Fail("%s: Operation has invalid inputs", __func__);
3423 }
3424
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003425 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003426 if (!output)
3427 {
3428 return Fail("%s: Could not read output 0", __func__);
3429 }
3430
3431 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003432
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003433 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003434 if (!axisOperand)
3435 {
3436 return Fail("%s: Could not read input 1", __func__);
3437 }
3438
3439 std::vector<int32_t> axis;
3440 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3441 {
3442 return Fail("%s: Input 1 has invalid values", __func__);
3443 }
3444
3445 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3446
3447 // Convert the axis to unsigned int and remove duplicates.
3448 unsigned int rank = inputInfo.GetNumDimensions();
3449 std::set<unsigned int> uniqueAxis;
3450 std::transform(axis.begin(), axis.end(),
3451 std::inserter(uniqueAxis, uniqueAxis.begin()),
3452 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3453
3454 // Get the "keep dims" flag.
3455 int32_t keepDims = 0;
3456 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3457 {
3458 return Fail("%s: Could not read input 2", __func__);
3459 }
3460
3461 armnn::MeanDescriptor descriptor;
3462 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3463 descriptor.m_KeepDims = keepDims > 0;
3464
3465 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003466 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003467 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3468 {
3469 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3470 IsMeanSupported,
3471 data.m_Backends,
3472 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003473 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003474 inputInfo,
3475 outputInfo,
3476 descriptor);
3477 };
3478
3479 if(!IsDynamicTensor(outputInfo))
3480 {
3481 validateFunc(outputInfo, isSupported);
3482 }
3483 else
3484 {
3485 isSupported = AreDynamicTensorsSupported();
3486 }
3487
Mike Kelly46272802019-08-14 17:00:48 +01003488 if (!isSupported)
3489 {
3490 return false;
3491 }
3492
3493 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003494 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003495 if (!layer)
3496 {
3497 return Fail("%s: Could not add the MeanLayer", __func__);
3498 }
Mike Kelly46272802019-08-14 17:00:48 +01003499 input.Connect(layer->GetInputSlot(0));
3500
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003501 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003502}
3503
3504template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003505 typename HalOperation = typename HalPolicy::Operation,
3506 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003507bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003508{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003509 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003510
Mike Kelly3c673942019-07-25 09:26:06 +01003511 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3512 if (!input.IsValid())
3513 {
3514 return Fail("%s: Operation has invalid inputs", __func__);
3515 }
3516
3517 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3518 unsigned int rank = inputInfo.GetNumDimensions();
3519
3520 armnn::PadDescriptor descriptor;
3521 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3522 {
3523 return Fail("%s: Could not convert paddings", __func__);
3524 }
3525
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003526 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3527 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003528 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3529 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3530 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003531 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003532 {
3533 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3534 }
3535
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003536 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003537 if (!output)
3538 {
3539 return Fail("%s: Could not read output", __func__);
3540 }
3541
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003542 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003543
3544 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003545 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003546 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3547 {
3548 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3549 IsPadSupported,
3550 data.m_Backends,
3551 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003552 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003553 inputInfo,
3554 outputInfo,
3555 descriptor);
3556 };
3557
3558 if(!IsDynamicTensor(outputInfo))
3559 {
3560 validateFunc(outputInfo, isSupported);
3561 }
3562 else
3563 {
3564 isSupported = AreDynamicTensorsSupported();
3565 }
3566
Mike Kelly3c673942019-07-25 09:26:06 +01003567 if (!isSupported)
3568 {
3569 return false;
3570 }
3571
3572 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003573 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003574 if (!layer)
3575 {
3576 return Fail("%s: Could not add the PadLayer", __func__);
3577 }
Mike Kelly3c673942019-07-25 09:26:06 +01003578 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003579
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003580 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003581}
3582
Mike Kelly0a879362019-07-29 16:56:31 +01003583template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003584 typename HalOperation = typename HalPolicy::Operation,
3585 typename HalModel = typename HalPolicy::Model>
3586bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003587{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003588 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003589
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003590 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3591 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3592 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003593
3594 if (inputOperand == nullptr
3595 || requestedShapeOperand == nullptr
3596 || outputOperand == nullptr)
3597 {
3598 return Fail("%s: Operation has invalid inputs", __func__);
3599 }
3600
3601 if (requestedShapeOperand->dimensions.size() != 1)
3602 {
3603 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3604 __func__, requestedShapeOperand->dimensions.size());
3605 }
3606
3607 std::vector<int32_t> targetDimensions;
3608 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3609 {
3610 return Fail("%s: Could not read values of input 1", __func__);
3611 }
3612
3613 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3614
3615 Shape requestedShape;
3616 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3617 // function that resolves these values into a fully specified tensor shape.
3618 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3619 {
3620 return Fail("%s: Failed to resolve the requested shape", __func__);
3621 }
3622
Mike Kelly46272802019-08-14 17:00:48 +01003623 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3624 if (!input.IsValid())
3625 {
3626 return Fail("%s: Could not read input 0", __func__);
3627 }
3628
3629 armnn::ReshapeDescriptor reshapeDescriptor;
3630 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3631 requestedShape.dimensions.data());
3632
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003633 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3634
Mike Kelly46272802019-08-14 17:00:48 +01003635 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003636 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003637 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3638 {
3639 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3640 IsReshapeSupported,
3641 data.m_Backends,
3642 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003643 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003644 input.GetTensorInfo(),
3645 outputInfo,
3646 reshapeDescriptor);
3647 };
3648
3649 if(!IsDynamicTensor(outputInfo))
3650 {
3651 validateFunc(outputInfo, isSupported);
3652 }
3653 else
3654 {
3655 isSupported = AreDynamicTensorsSupported();
3656 }
3657
Mike Kelly46272802019-08-14 17:00:48 +01003658 if (!isSupported)
3659 {
3660 return false;
3661 }
3662
3663 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003664 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003665 if (!layer)
3666 {
3667 return Fail("%s: Could not add the ReshapeLayer", __func__);
3668 }
Mike Kelly46272802019-08-14 17:00:48 +01003669 input.Connect(layer->GetInputSlot(0));
3670
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003671 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003672}
3673
3674template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003675 typename HalOperation = typename HalPolicy::Operation,
3676 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003677bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003678{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003679 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003680
3681 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3682 if (!input.IsValid())
3683 {
3684 return Fail("%s: Operation has invalid inputs", __func__);
3685 }
3686
3687 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3688 unsigned int rank = inputInfo.GetNumDimensions();
3689 if (rank > 4)
3690 {
3691 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3692 }
3693
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003694 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003695 if (!output)
3696 {
3697 return Fail("%s: Could not read output 0", __func__);
3698 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003699
3700 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003701 {
3702 return Fail("%s: Dynamic output tensors are not supported", __func__);
3703 }
3704
3705 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3706 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003707 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003708
Mike Kelly46272802019-08-14 17:00:48 +01003709 std::vector<int32_t> axis;
3710 if (!axisOperand)
3711 {
Mike Kelly1b46d132021-11-03 11:12:45 +00003712 for (unsigned int i = 0; i < rank; ++i)
3713 {
3714 axis.push_back(static_cast<unsigned int>(i));
3715 }
Mike Kelly46272802019-08-14 17:00:48 +01003716 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003717 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003718 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003719 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003720 }
3721
3722 std::vector<uint32_t> outputDims;
3723 for (unsigned int i = 0; i < rank; i++)
3724 {
3725 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3726 auto currentDimension = inputInfo.GetShape()[i];
3727 if (skipSqueeze || currentDimension != 1)
3728 {
3729 outputDims.push_back(currentDimension);
3730 }
3731 }
3732
3733 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3734
3735 armnn::TensorInfo outputInfo = inputInfo;
3736 outputInfo.SetShape(outShape);
3737
3738 armnn::ReshapeDescriptor reshapeDesc;
3739 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3740
3741 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003742 armnn::BackendId setBackend;
Mike Kelly46272802019-08-14 17:00:48 +01003743 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3744 IsReshapeSupported,
3745 data.m_Backends,
3746 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003747 setBackend,
Mike Kelly46272802019-08-14 17:00:48 +01003748 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003749 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003750 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003751
Mike Kelly46272802019-08-14 17:00:48 +01003752 if (!isSupported)
3753 {
3754 return false;
3755 }
3756
3757 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003758 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003759 if (!layer)
3760 {
3761 return Fail("%s: Could not add the ReshapeLayer", __func__);
3762 }
Mike Kelly46272802019-08-14 17:00:48 +01003763 input.Connect(layer->GetInputSlot(0));
3764
3765 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3766}
3767
3768template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003769 typename HalOperation = typename HalPolicy::Operation,
3770 typename HalModel = typename HalPolicy::Model>
3771bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003772{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003773 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003774
3775 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3776 if (!input.IsValid())
3777 {
3778 return Fail("%s: Operation has invalid inputs", __func__);
3779 }
3780
3781 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3782 unsigned int rank = inputInfo.GetNumDimensions();
3783 if (rank > 4)
3784 {
3785 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3786 }
3787
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003788 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003789 if (!output)
3790 {
3791 return Fail("%s: Could not read output 0", __func__);
3792 }
3793
3794 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003795
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003796 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3797 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3798 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003799
3800 std::vector<int32_t> beginValues;
3801 std::vector<int32_t> endValues;
3802 std::vector<int32_t> stridesValues;
3803
3804 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003805 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003806 {
3807 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3808 {
3809 return false;
3810 }
3811
3812 if (operandValues.size() != rank)
3813 {
3814 return false;
3815 }
3816
3817 return true;
3818 };
3819
3820 if (!ValidateInputOperands(*beginOperand, beginValues)
3821 || !ValidateInputOperands(*endOperand, endValues)
3822 || !ValidateInputOperands(*stridesOperand, stridesValues))
3823 {
3824 return Fail("%s: Operation has invalid input operand", __func__);
3825 }
3826
3827 // Stride cannot have value '0'
3828 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3829 {
3830 return Fail("%s: Stride must be non-zero value.", __func__);
3831 }
3832
3833 armnn::StridedSliceDescriptor descriptor;
3834 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3835 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3836 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3837 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3838
3839 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3840 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3841 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3842 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3843 {
3844 return Fail("%s: Operation has invalid inputs", __func__);
3845 }
3846
3847 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003848 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003849 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3850 {
3851 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3852 IsStridedSliceSupported,
3853 data.m_Backends,
3854 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003855 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003856 inputInfo,
3857 outputInfo,
3858 descriptor);
3859 };
3860
3861 if(IsDynamicTensor(outputInfo))
3862 {
3863 isSupported = AreDynamicTensorsSupported();
3864 }
3865 else
3866 {
3867 validateFunc(outputInfo, isSupported);
3868 }
3869
Mike Kelly46272802019-08-14 17:00:48 +01003870 if (!isSupported)
3871 {
3872 return false;
3873 }
3874
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003875 // Check if slice can fit in a inferred output
3876 armnn::TensorShape inputShape = inputInfo.GetShape();
3877 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3878 {
3879 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003880
3881 if (descriptor.m_ShrinkAxisMask & (1 << i))
3882 {
3883 // If the difference between the start point and the end point of the slice on an axis being shrunk
3884 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3885 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3886 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3887 {
3888 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3889 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003890
3891 if(stride < 0)
3892 {
3893 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3894 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003895 }
3896 }
3897
Mike Kelly46272802019-08-14 17:00:48 +01003898 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003899 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003900 if (!layer)
3901 {
3902 return Fail("%s: Could not add the StridedSliceLayer", __func__);
3903 }
Mike Kelly46272802019-08-14 17:00:48 +01003904 input.Connect(layer->GetInputSlot(0));
3905
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003906 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003907}
3908
3909template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003910 typename HalOperation = typename HalPolicy::Operation,
3911 typename HalModel = typename HalPolicy::Model>
3912bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003913{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003914 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003915 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003916
3917 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3918 if (!input.IsValid())
3919 {
3920 return Fail("%s: Operation has invalid inputs", __func__);
3921 }
3922
3923 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3924 unsigned int rank = inputInfo.GetNumDimensions();
3925 if (rank > 4)
3926 {
3927 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3928 }
3929
3930 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3931 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003932 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003933
3934 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003935 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003936 {
Mike Kelly46272802019-08-14 17:00:48 +01003937 for (unsigned int i = rank; i > 0; i--)
3938 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003939 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003940 }
3941 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003942 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003943 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003944 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003945 }
3946
3947 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3948
Mike Kelly4a956582020-02-28 10:32:09 +00003949 armnn::TransposeDescriptor transposeDesc;
3950 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003951
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003952 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003953 if (!output)
3954 {
3955 return Fail("%s: Could not read output 0", __func__);
3956 }
3957
3958 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3959
3960 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003961 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003962 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3963 {
3964 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3965 IsTransposeSupported,
3966 data.m_Backends,
3967 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003968 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003969 inputInfo,
3970 outputInfo,
3971 transposeDesc);
3972 };
3973
3974 if(IsDynamicTensor(outputInfo))
3975 {
3976 isSupported = AreDynamicTensorsSupported();
3977 }
3978 else
3979 {
3980 validateFunc(outputInfo, isSupported);
3981 }
3982
Mike Kelly46272802019-08-14 17:00:48 +01003983 if (!isSupported)
3984 {
3985 return false;
3986 }
3987
Mike Kelly4a956582020-02-28 10:32:09 +00003988 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003989 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003990 if (!layer)
3991 {
3992 return Fail("%s: Could not add the TransposeLayer", __func__);
3993 }
Mike Kelly46272802019-08-14 17:00:48 +01003994 input.Connect(layer->GetInputSlot(0));
3995
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003996 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003997}
3998
3999template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004000 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004001 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004002 typename HalModel = typename HalPolicy::Model>
4003bool ConvertBatchToSpaceNd(const HalOperation& operation,
4004 const HalModel& model,
4005 ConversionData& data)
4006{
Finn Williams23b87b32019-07-30 11:44:05 +01004007
4008 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4009 if (!input.IsValid())
4010 {
4011 return Fail("%s: Operation has invalid inputs", __func__);
4012 }
4013
4014 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4015 if (!output)
4016 {
4017 return Fail("%s: Could not read output 0", __func__);
4018 }
4019
4020 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004021
4022 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4023 if (!blockOperand)
4024 {
4025 return Fail("%s: Could not read input 1", __func__);
4026 }
4027
4028 // Convert the block operand to int32
4029 std::vector<int32_t> block;
4030 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4031 {
4032 return Fail("%s: Input 1 has invalid values", __func__);
4033 }
4034
4035 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4036
4037 unsigned int rank = inputInfo.GetNumDimensions();
4038 if (rank != 4)
4039 {
4040 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4041 }
4042
4043 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4044 {
4045 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4046 " greater than or equal to 1", __func__);
4047 }
4048
4049 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4050 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4051 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4052
Kevin May42477c12020-03-26 13:34:14 +00004053 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004054 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004055 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004056 }
4057 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4058 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4059
4060 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004061 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004062 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4063 {
4064 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4065 IsBatchToSpaceNdSupported,
4066 data.m_Backends,
4067 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004068 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004069 inputInfo,
4070 outputInfo,
4071 batchToSpaceNdDesc);
4072 };
4073
4074 if(!IsDynamicTensor(outputInfo))
4075 {
4076 validateFunc(outputInfo, isSupported);
4077 }
4078 else
4079 {
4080 isSupported = AreDynamicTensorsSupported();
4081 }
4082
4083
Finn Williams23b87b32019-07-30 11:44:05 +01004084 if (!isSupported)
4085 {
4086 return false;
4087 }
4088
4089 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004090 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004091 if (!layer)
4092 {
4093 return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4094 }
Finn Williams23b87b32019-07-30 11:44:05 +01004095 input.Connect(layer->GetInputSlot(0));
4096
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004097 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004098}
Mike Kelly0a879362019-07-29 16:56:31 +01004099
Finn Williamsd74c5052019-07-30 17:06:00 +01004100template<typename HalPolicy,
4101 typename HalOperation = typename HalPolicy::Operation,
4102 typename HalOperand = typename HalPolicy::Operand,
4103 typename HalModel = typename HalPolicy::Model>
4104bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4105{
4106 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4107 if (!input.IsValid())
4108 {
4109 return Fail("%s: Operation has invalid inputs", __func__);
4110 }
4111
4112 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4113 unsigned int rank = inputInfo.GetNumDimensions();
4114 unsigned int spatialDim = rank - 2;
4115
4116 if (rank != 4)
4117 {
4118 Fail("%s: Only inputs with rank 4 are supported", __func__);
4119 }
4120
4121 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4122 if (!output)
4123 {
4124 return Fail("%s: Could not read output 0", __func__);
4125 }
4126
4127 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004128
4129 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4130 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4131
4132 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4133 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4134 {
4135 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4136 }
4137
4138 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004139 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4140 {
4141 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4142 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004143 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4144 {
4145 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4146 }
4147
4148 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4149 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4150 {
4151 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4152 }
4153
4154 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4155 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004156 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4157 {
4158 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4159 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004160 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4161 {
4162 int paddingBeforeInput = paddings[i];
4163 int paddingAfterInput = paddings[i + 1];
4164 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4165 {
4166 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4167 }
4168
Mike Kelly1b46d132021-11-03 11:12:45 +00004169 paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4170 static_cast<unsigned int>(paddingAfterInput));
Finn Williamsd74c5052019-07-30 17:06:00 +01004171 }
4172
4173 armnn::SpaceToBatchNdDescriptor descriptor;
4174 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4175 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4176 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4177
Kevin May42477c12020-03-26 13:34:14 +00004178 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004179 {
4180 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4181 }
4182
4183 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004184 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004185 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4186 {
4187 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4188 IsSpaceToBatchNdSupported,
4189 data.m_Backends,
4190 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004191 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004192 inputInfo,
4193 outputInfo,
4194 descriptor);
4195 };
4196
4197 if(IsDynamicTensor(outputInfo))
4198 {
4199 isSupported = AreDynamicTensorsSupported();
4200 }
4201 else
4202 {
4203 validateFunc(outputInfo, isSupported);
4204 }
4205
Finn Williamsd74c5052019-07-30 17:06:00 +01004206 if (!isSupported)
4207 {
4208 return false;
4209 }
4210
4211 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004212 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004213 if (!layer)
4214 {
4215 return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4216 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004217 input.Connect(layer->GetInputSlot(0));
4218
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004219 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004220}
4221
saoste01b8471482018-10-10 09:44:51 +01004222} // namespace armnn_driver