blob: efd7010c827fef7189063d1360974183e1aeaaed [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Cathal Corbett8de96f72022-09-01 13:34:59 +01002// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000012#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010013#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
James Ward4e22f602020-10-20 15:50:33 +010024#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010025
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
Kevin Mayec1e5b82020-02-26 17:00:39 +000036#ifdef ARMNN_ANDROID_R
37using OperandType = android::nn::hal::OperandType;
38#endif
39
Sadik Armagan188675f2021-02-12 17:16:42 +000040#ifdef ARMNN_ANDROID_S
41#include <nnapi/Types.h>
42#endif
43
44
arovir01b0717b52018-09-05 17:03:25 +010045struct ConversionData
46{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010047 ConversionData(const std::vector<armnn::BackendId>& backends)
48 : m_Backends(backends)
49 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010050 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010051 {}
52
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010053 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010054 armnn::INetworkPtr m_Network;
55 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
56 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010057 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010058};
59
60class LayerInputHandle
61{
62public:
63 LayerInputHandle();
64 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
65
66 bool IsValid() const;
67
68 void Connect(armnn::IInputSlot& inputSlot);
69
Finn Williamsa4983ce2020-07-23 12:55:12 +010070 void Disconnect(armnn::IInputSlot& inputSlot);
71
arovir01b0717b52018-09-05 17:03:25 +010072 const armnn::TensorInfo& GetTensorInfo() const;
73
Cathal Corbett915f2a72022-04-15 14:12:08 +010074 void SanitizeQuantizationScale(LayerInputHandle& weight,
75 LayerInputHandle& input);
76
arovir01b0717b52018-09-05 17:03:25 +010077private:
78 armnn::IOutputSlot* m_OutputSlot;
79 bool m_Valid;
80 armnn::TensorInfo m_TensorInfo;
81};
82
83class ConstTensorPin
84{
85public:
86 // Creates an invalid tensor pin (can be used to signal errors)
87 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
88 ConstTensorPin(bool optional = false);
89
90 // @param tensorInfo TensorInfo associated with the tensor.
91 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
92 // the model being converted.
93 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010094 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010095 const armnn::PermutationVector& mappings);
96
97 ConstTensorPin(const ConstTensorPin& other) = delete;
98 ConstTensorPin(ConstTensorPin&& other) = default;
99
100 bool IsValid() const;
101 bool IsOptional() const;
102
103 const armnn::ConstTensor& GetConstTensor() const;
104 const armnn::ConstTensor* GetConstTensorPtr() const;
105
106private:
107 armnn::ConstTensor m_ConstTensor;
108
109 // Owned memory for swizzled tensor data, only required if the tensor needed
110 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
111 // the pools associated with the model being converted.
112 std::vector<uint8_t> m_SwizzledTensorData;
113
114 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
115 bool m_Optional;
116};
117
118} // namespace armnn_driver
119
120///
121/// Utility functions
122///
123
124namespace
125{
126
127using namespace armnn_driver;
128using namespace android::nn;
129
130// Convenience function to log the reason for failing to convert a model.
131// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
132template<class... Args>
133static bool Fail(const char* formatStr, Args&&... args)
134{
135 ALOGD(formatStr, std::forward<Args>(args)...);
136 return false;
137}
138
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
140// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
Cathal Corbett8de96f72022-09-01 13:34:59 +0100141#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100142try \
143{ \
144 for (auto&& backendId : backends) \
145 { \
146 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000147 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100148 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100149 std::string reasonIfUnsupported; \
150 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000151 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 { \
Cathal Corbett8de96f72022-09-01 13:34:59 +0100154 setBackend = backendId; \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100155 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 else \
158 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100159 if (reasonIfUnsupported.size() > 0) \
160 { \
161 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
162 } \
163 else \
164 { \
165 ALOGD("%s: not supported by armnn", funcName); \
166 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100167 } \
168 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100169 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100170 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100171 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100172 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100173 } \
174 if (!supported) \
175 { \
176 ALOGD("%s: not supported by any specified backend", funcName); \
177 } \
178} \
179catch (const armnn::InvalidArgumentException &e) \
180{ \
181 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
182}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100183
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000184template<typename HalOperand>
185armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100186{
187 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
188}
189
Matthew Bentham912b3622019-05-03 15:49:14 +0100190inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100191{
Matthew Bentham912b3622019-05-03 15:49:14 +0100192 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
193 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
194 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100195}
196
Kevin May42477c12020-03-26 13:34:14 +0000197#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100198
Keith Davis71006492020-01-06 17:44:16 +0000199// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100200inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
201{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000202 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000203 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000204 type == V1_2::OperandType::TENSOR_FLOAT16 ||
205 type == V1_2::OperandType::TENSOR_FLOAT32 ||
206 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000207 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000208 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
209 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100210 type == V1_2::OperandType::TENSOR_INT32;
211}
212
213#endif
214
Kevin May42477c12020-03-26 13:34:14 +0000215#ifdef ARMNN_ANDROID_NN_V1_3
216
217// Support within the 1.3 driver for specific tensor data types
218inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
219{
220 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100221 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000222 type == V1_3::OperandType::TENSOR_FLOAT16 ||
223 type == V1_3::OperandType::TENSOR_FLOAT32 ||
224 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100225 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000226 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
227 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
228 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
229 type == V1_3::OperandType::TENSOR_INT32;
230}
231
232#endif
233
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234inline bool IsBool(V1_0::Operand)
235{
236 return false;
237}
238
Kevin May42477c12020-03-26 13:34:14 +0000239inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100240{
241 return false;
242}
243
Kevin May42477c12020-03-26 13:34:14 +0000244#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100245
246inline bool IsBool(V1_2::Operand operand)
247{
248 return operand.type == V1_2::OperandType::BOOL;
249}
250
Sadik Armagan61113162019-07-25 09:09:40 +0100251/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000252inline bool Is12OrLaterOperand(V1_2::Operand)
253{
254 return true;
255}
256
257#endif
258
259#ifdef ARMNN_ANDROID_NN_V1_3
260
261inline bool IsBool(V1_3::Operand operand)
262{
263 return operand.type == V1_3::OperandType::BOOL;
264}
265
266/// Checks if a operand is 1_2 Operand
267inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100268{
269 return true;
270}
271
Mike Kellyb5fdf382019-06-11 16:35:25 +0100272#endif
273
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000275armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
276 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100277 armnn::TensorInfo reshapeInfo)
278{
279 armnn::ReshapeDescriptor reshapeDescriptor;
280 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
281
282 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +0100283 if (!reshapeLayer)
284 {
285 throw armnn::RuntimeException("ReshapeLayer is null");
286 }
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100287
288 // Attach the input layer to the reshape layer
289 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
290 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
291
292 return *reshapeLayer;
293}
294
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000295bool BroadcastTensor(LayerInputHandle& input0,
296 LayerInputHandle& input1,
297 armnn::IConnectableLayer* startLayer,
298 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100299{
Mike Kellye2d611e2021-10-14 12:35:58 +0100300 if (!startLayer)
301 {
302 throw armnn::RuntimeException("StartLayer is null");
303 }
arovir01b0717b52018-09-05 17:03:25 +0100304
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
306 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
307
308 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
309 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
310
311 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100312 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
314 input0.Connect(startLayer->GetInputSlot(0));
315 input1.Connect(startLayer->GetInputSlot(1));
316
Sadik Armagan64b19b52019-08-19 09:49:58 +0100317 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100318 }
319
320 // Since the number of dimensions do not match then we need to add degenerate dimensions
321 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
322
323 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100324 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
325 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100326
327 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
328 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
329 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
330
331 const armnn::TensorShape& smallShape = smallInfo.GetShape();
332 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
333 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
334 {
335 reshapedDimensions[i] = smallShape[i - sizeDifference];
336 }
337
338 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100339 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100340 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100341
342 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
343 armnn::ReshapeDescriptor reshapeDescriptor;
344
345 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +0100346 armnn::BackendId setBackend;
Sadik Armagan64b19b52019-08-19 09:49:58 +0100347 FORWARD_LAYER_SUPPORT_FUNC(__func__,
348 IsReshapeSupported,
349 data.m_Backends,
350 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100351 setBackend,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000352 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100353 reshapedInfo,
354 reshapeDescriptor);
355 if (!isSupported)
356 {
357 return false;
358 }
359
Mike Kellye2d611e2021-10-14 12:35:58 +0100360 if (!data.m_Network)
361 {
362 throw armnn::RuntimeException("Network is null");
363 }
364
Sadik Armagan64b19b52019-08-19 09:49:58 +0100365 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100366 reshapeLayer.SetBackendId(setBackend);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100367
368 if (input0IsSmaller)
369 {
370 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
371 //
372 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100373 // | |
374 // Reshape |
375 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100377
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100378 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
379 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100380 }
381 else
382 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100383 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
384 //
385 // Input0 Input1
386 // | |
387 // | Reshape
388 // \ /
389 // StartLayer
390
arovir01b0717b52018-09-05 17:03:25 +0100391 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100392 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100393 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100394
395 return true;
arovir01b0717b52018-09-05 17:03:25 +0100396}
397
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000398void CalcPadding(uint32_t input,
399 uint32_t kernel,
400 uint32_t stride,
401 uint32_t& outPadHead,
402 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100403 android::nn::PaddingScheme scheme)
404{
405 int32_t padHead;
406 int32_t padTail;
407 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100408 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
409 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100410}
411
Kevin May42477c12020-03-26 13:34:14 +0000412#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100413
414void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
415 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
416{
417 int32_t padHead;
418 int32_t padTail;
419 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100420 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
421 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100422}
423
Mike Kelly26123db2020-01-15 10:02:33 +0000424void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100425 int32_t& outPadTail, android::nn::PaddingScheme scheme)
426{
427 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
428}
429
Mike Kelly86b36d42019-07-12 16:39:33 +0100430#endif
431
Matthew Bentham912b3622019-05-03 15:49:14 +0100432Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100433{
434 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100435 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100436 shape.dimensions = operand.dimensions;
437 shape.scale = operand.scale;
438 shape.offset = operand.zeroPoint;
439 return shape;
440}
441
Kevin May42477c12020-03-26 13:34:14 +0000442#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100443
444Shape GetOperandShape(const V1_2::Operand& operand)
445{
446 Shape shape;
447 shape.type = OperandType(operand.type);
448 shape.dimensions = operand.dimensions;
449 shape.scale = operand.scale;
450 shape.offset = operand.zeroPoint;
451 return shape;
452}
453
454#endif
455
Kevin May42477c12020-03-26 13:34:14 +0000456#ifdef ARMNN_ANDROID_NN_V1_3
457
458Shape GetOperandShape(const V1_3::Operand& operand)
459{
460 Shape shape;
461 shape.type = OperandType(operand.type);
462 shape.dimensions = operand.dimensions;
463 shape.scale = operand.scale;
464 shape.offset = operand.zeroPoint;
465 return shape;
466}
467
468#endif
469
arovir01b0717b52018-09-05 17:03:25 +0100470// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
471// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100472// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
473// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100474void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000475 const armnn::TensorInfo& weightInfo,
476 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100477{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000478 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100479 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000480 // NOTE: Bias scale is always set to 0 for per-axis quantization and
481 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
482 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100483 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000484 return biasScale * inputInfo.GetQuantizationScale();
485 };
486
487 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
488 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
489
490 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100491 // bias is expected to be a 1d tensor, set qdim=0
492 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000493
494 ALOGV("Bias quantization params have been updated for per-axis quantization");
495 }
496 else
497 {
498 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
499 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
500 {
James Ward4e22f602020-10-20 15:50:33 +0100501 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000502 {
503 ALOGW("Bias quantization scale has been modified to match input * weights");
504 biasInfo.SetQuantizationScale(expectedBiasScale);
505 }
arovir01b0717b52018-09-05 17:03:25 +0100506 }
507 }
508}
509
510// 4D Tensor Permutations
511const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100512const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000513const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100514
515// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000516const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
517const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100518
519template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000520armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
521 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100522{
523 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000524 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
Mike Kellye2d611e2021-10-14 12:35:58 +0100525 if (!layer)
526 {
527 throw armnn::RuntimeException("TransposeLayer is null");
528 }
arovir01b0717b52018-09-05 17:03:25 +0100529 // Connect input to swizzle layer
530 input.Connect(layer->GetInputSlot(0));
531
532 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000533 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100534 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
535
536 return *layer;
537}
538
arovir01b0717b52018-09-05 17:03:25 +0100539bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
540 const armnn::TensorShape & outputShape,
541 uint32_t concatDim)
542{
543 // Validate the output shape is correct given the input shapes (which have just been validated)
544 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
545 if (outputShape.GetNumDimensions() != numDimensions)
546 {
547 return Fail("%s: Output shape has wrong number of dimensions", __func__);
548 }
549
550 unsigned int outputSizeAlongConcatenatedDimension = 0;
551 for (unsigned int i = 0; i < inputShapes.size(); i++)
552 {
553 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
554 }
555
556 for (unsigned int i = 0; i < numDimensions; ++i)
557 {
558 if (i == concatDim)
559 {
560 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
561 {
562 return Fail(
563 "%s: Invalid output shape for dimension %d (%d != %d)",
564 __func__,
565 i,
566 outputShape[i],
567 outputSizeAlongConcatenatedDimension);
568 }
569 }
570 else
571 {
572 if (outputShape[i] != inputShapes[0][i])
573 {
574 return Fail("%s: Invalid output shape", __func__);
575 }
576 }
577 }
578
579 return true;
580}
581
582bool RequiresReshape(armnn::TensorShape & inputShape)
583{
584 return inputShape.GetNumDimensions() < 3;
585}
586
arovir01b0717b52018-09-05 17:03:25 +0100587void SwizzleInputs(armnn::INetwork& network,
588 std::vector<LayerInputHandle>& inputs,
589 std::vector<armnn::TensorShape>& inputShapes,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100590 const armnn::PermutationVector& mapping,
591 std::vector<armnn::BackendId>& setBackends)
arovir01b0717b52018-09-05 17:03:25 +0100592{
593 if (!mapping.IsEqual(IdentityPermutation4D))
594 {
595 size_t nInputs = inputs.size();
596 for (size_t i=0; i<nInputs; ++i)
597 {
598 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000599 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100600 swizzleLayer.SetBackendId(setBackends[i]);
arovir01b0717b52018-09-05 17:03:25 +0100601 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
602 auto& outputInfo = outputSlot.GetTensorInfo();
603 // replace inputs with the swizzled ones
604 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
605 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
606 }
607 }
608}
609
Teresa Charlin185f5882020-04-06 21:59:18 +0100610bool TransposeInputTensors(ConversionData& data,
611 std::vector<LayerInputHandle>& inputs,
612 std::vector<armnn::TensorShape>& inputShapes,
613 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000614{
David Monahan7f492ac2020-10-16 10:36:29 +0100615 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
616 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000617 {
Cathal Corbett8de96f72022-09-01 13:34:59 +0100618 std::vector<armnn::BackendId> setBackendsVec;
Teresa Charlin185f5882020-04-06 21:59:18 +0100619 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000620 size_t nInputs = inputs.size();
621 for (size_t i=0; i<nInputs; ++i)
622 {
623 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000624 armnn::TransposeDescriptor transposeDesc;
625 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100626 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000627
628 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +0100629 armnn::BackendId setBackend;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000630 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000631 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000632 data.m_Backends,
633 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +0100634 setBackend,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000635 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100636 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000637 transposeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +0100638 setBackendsVec.push_back(setBackend);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000639 if (!isSupported)
640 {
641 return false;
642 }
643
644 }
Cathal Corbett8de96f72022-09-01 13:34:59 +0100645 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000646 }
647 return true;
648}
649
650
narpra01f176d5a2018-11-18 20:17:48 +0000651bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
652 int32_t & concatDimension,
653 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100654{
narpra01f176d5a2018-11-18 20:17:48 +0000655 bool needPermute = false;
Mike Kellye2d611e2021-10-14 12:35:58 +0100656
657 if (numberOfDimensions < 3)
658 {
659 return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
660 }
arovir01b0717b52018-09-05 17:03:25 +0100661
662 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000663 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
664 // or along dimension 0 or 2 for a 3-D tensor.
665 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100666 {
Cathal Corbetta6d99be2022-03-07 14:35:23 +0000667 concatDimension = 3;
668 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
narpra01f176d5a2018-11-18 20:17:48 +0000669 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100670 }
narpra01f176d5a2018-11-18 20:17:48 +0000671 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100672 {
narpra01f176d5a2018-11-18 20:17:48 +0000673 concatDimension = 0;
674 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
675 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100676 }
David Monahan7f492ac2020-10-16 10:36:29 +0100677 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
678 // permutation identity to only have 3 dimensions
679 else if (numberOfDimensions == 3 && concatDimension == 2)
680 {
681 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
682 }
narpra01f176d5a2018-11-18 20:17:48 +0000683 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100684}
685
686} // anonymous namespace
687
688namespace armnn_driver
689{
690
691//// Creates an ArmNN activation layer and connects it to the given layer, if the
692//// passed in AndroidNN activation function requires so.
693//// @return The end layer of the sequence of layers built for the given AndroidNN
694//// activation function or nullptr if an error occurred (e.g. unsupported activation).
695//// Note that the end layer matches the input layer if no activation is required
696//// (the sequence of layers has length 1).
697armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
698 ActivationFn activation,
699 armnn::IConnectableLayer* prevLayer,
700 ConversionData& data);
701
702} // namespace armnn_driver
703
704///
705/// Utility templates
706///
707
708namespace armnn_driver
709{
710
711using namespace android::nn;
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalOperation = typename HalPolicy::Operation,
716 typename HalModel = typename HalPolicy::Model>
717const HalOperand* GetInputOperand(const HalOperation& operation,
718 uint32_t inputIndex,
719 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100720 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100721{
722 if (inputIndex >= operation.inputs.size())
723 {
saoste01b8471482018-10-10 09:44:51 +0100724 if (failOnIndexOutOfBounds)
725 {
Mike Kellye2d611e2021-10-14 12:35:58 +0100726 Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
saoste01b8471482018-10-10 09:44:51 +0100727 }
arovir01b0717b52018-09-05 17:03:25 +0100728 return nullptr;
729 }
730
Kevin May42477c12020-03-26 13:34:14 +0000731 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100732 if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
733 {
734 Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
735 return nullptr;
736 }
737
Kevin May42477c12020-03-26 13:34:14 +0000738 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100739}
740
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741template<typename HalPolicy,
742 typename HalOperand = typename HalPolicy::Operand,
743 typename HalOperation = typename HalPolicy::Operation,
744 typename HalModel = typename HalPolicy::Model>
745const HalOperand* GetOutputOperand(const HalOperation& operation,
746 uint32_t outputIndex,
747 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100748{
749 if (outputIndex >= operation.outputs.size())
750 {
751 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
752 return nullptr;
753 }
754
755 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100756 if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
757 {
758 Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
759 return nullptr;
760 }
Kevin May42477c12020-03-26 13:34:14 +0000761 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100762}
763
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100765 typename HalOperand = typename HalPolicy::Operand,
766 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100767const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100768 const HalModel& model,
769 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000770 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100771{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100772 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100773
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100774 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100775 switch (operand.lifetime)
776 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100777 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100778 {
779 // Constant found in model.operandValues
780 valueStart = &model.operandValues[operand.location.offset];
781 break;
782 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100783 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100784 {
785 // Constant specified via a Memory object
786 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
787 break;
788 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000790 {
791 // An optional input tensor with no values is not an error so should not register as a fail
792 if (optional)
793 {
794 valueStart = nullptr;
795 break;
796 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100797 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000798 }
arovir01b0717b52018-09-05 17:03:25 +0100799 default:
800 {
801 // Unsupported/invalid (e.g. can't get value of an input to the model)
802 Fail("%s: unsupported/invalid operand lifetime: %s",
803 __func__, toString(operand.lifetime).c_str());
804 valueStart = nullptr;
805 }
806 }
807
808 return valueStart;
809}
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model,
814 typename HalOperandType = typename HalPolicy::OperandType>
815bool GetOperandType(const HalOperation& operation,
816 uint32_t inputIndex,
817 const HalModel& model,
818 HalOperandType& type)
819{
820 using HalOperand = typename HalPolicy::Operand;
821
822 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
823 if (!operand)
824 {
825 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
826 }
827
828 type = operand->type;
829 return true;
830}
831
832template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000833 typename HalOperand = typename HalPolicy::Operand>
834bool IsOperandConstant(const HalOperand& operand)
835{
836 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
837
838 HalOperandLifeTime lifetime = operand.lifetime;
839
840 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
841 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
842 lifetime == HalOperandLifeTime::NO_VALUE;
843}
844
845template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846 typename HalOperand = typename HalPolicy::Operand,
847 typename HalModel = typename HalPolicy::Model>
848ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
849 const HalModel& model,
850 const ConversionData& data,
851 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
852 const armnn::TensorShape* overrideTensorShape = nullptr,
853 bool optional = false)
854{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100855 if (!IsOperandTypeSupportedForTensors(operand.type))
856 {
857 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
858 return ConstTensorPin();
859 }
860
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000861 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100862 {
863 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
864 return ConstTensorPin();
865 }
866
867 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
868 if (!valueStart)
869 {
870 if (optional)
871 {
872 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
873 return ConstTensorPin(true);
874 }
875 // mandatory tensor with no values
876 Fail("%s: failed to get operand address", __func__);
877 return ConstTensorPin();
878 }
879
880 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000881
Matthew Sloyan29cc9612021-07-16 10:21:12 +0100882 // Make sure isConstant flag is set.
883 tensorInfo.SetConstant();
884
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 if (overrideTensorShape != nullptr)
886 {
887 tensorInfo.SetShape(*overrideTensorShape);
888 }
889 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
890}
891
892template<typename HalPolicy,
893 typename HalOperation = typename HalPolicy::Operation,
894 typename HalModel = typename HalPolicy::Model>
895ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
896 uint32_t inputIndex,
897 const HalModel& model,
898 const ConversionData& data,
899 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
900 const armnn::TensorShape* overrideTensorShape = nullptr,
901 bool optional = false)
902{
903 using HalOperand = typename HalPolicy::Operand;
904
905 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
906 if (!operand)
907 {
908 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
909 return ConstTensorPin();
910 }
911 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
912 model,
913 data,
914 dimensionMappings,
915 overrideTensorShape,
916 optional);
917}
918
919template<typename HalPolicy,
920 typename OutputType,
921 typename HalOperandType = typename HalPolicy::OperandType,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100924bool GetInputScalar(const HalOperation& operation,
925 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100926 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100927 OutputType& outValue,
928 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100929 const ConversionData& data,
930 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100931{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100932 using HalOperand = typename HalPolicy::Operand;
933
934 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100935 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100936 {
937 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
938 }
939
Sadik Armagan813f2302020-05-19 14:10:30 +0100940 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100941 {
942 return Fail("%s: unexpected operand type: %s (should be %s)",
943 __func__, toString(operand->type).c_str(), toString(type).c_str());
944 }
945
Sadik Armagan813f2302020-05-19 14:10:30 +0100946 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100947 {
948 return Fail("%s: incorrect operand location length: %i (should be %i)",
949 __func__, operand->location.length, sizeof(OutputType));
950 }
951
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100953 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100954 {
955 return Fail("%s: failed to get address for operand", __func__);
956 }
957
Sadik Armagan813f2302020-05-19 14:10:30 +0100958 if(!optional)
959 {
960 outValue = *(static_cast<const OutputType*>(valueAddress));
961 }
962
arovir01b0717b52018-09-05 17:03:25 +0100963 return true;
964}
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966template<typename HalPolicy,
967 typename HalOperation = typename HalPolicy::Operation,
968 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100969bool GetInputInt32(const HalOperation& operation,
970 uint32_t inputIndex,
971 int32_t& outValue,
972 const HalModel& model,
973 const ConversionData& data)
974{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100975 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100976}
977
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100978template<typename HalPolicy,
979 typename HalOperation = typename HalPolicy::Operation,
980 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100981bool GetInputFloat32(const HalOperation& operation,
982 uint32_t inputIndex,
983 float& outValue,
984 const HalModel& model,
985 const ConversionData& data)
986{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100988}
989
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990template<typename HalPolicy,
991 typename HalOperation = typename HalPolicy::Operation,
992 typename HalOperandType = typename HalPolicy::OperandType,
993 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100994bool GetInputActivationFunctionImpl(const HalOperation& operation,
995 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100996 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100997 ActivationFn& outActivationFunction,
998 const HalModel& model,
999 const ConversionData& data)
1000{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001001 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001002 {
1003 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
1004 __func__,
1005 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +00001006 toString(HalOperandType::INT32).c_str(),
1007 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001008 }
1009
1010 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001012 {
1013 return Fail("%s: failed to get activation input value", __func__);
1014 }
1015 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1016 return true;
1017}
1018
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001019template<typename HalPolicy,
1020 typename HalOperation = typename HalPolicy::Operation,
1021 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001022bool GetInputActivationFunction(const HalOperation& operation,
1023 uint32_t inputIndex,
1024 ActivationFn& outActivationFunction,
1025 const HalModel& model,
1026 const ConversionData& data)
1027{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1029 inputIndex,
1030 HalPolicy::OperandType::INT32,
1031 outActivationFunction,
1032 model,
1033 data);
arovir01b0717b52018-09-05 17:03:25 +01001034}
1035
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001036template<typename HalPolicy,
1037 typename HalOperation = typename HalPolicy::Operation,
1038 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001039bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1040 uint32_t inputIndex,
1041 ActivationFn& outActivationFunction,
1042 const HalModel& model,
1043 const ConversionData& data)
1044{
1045 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001046 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1047 inputIndex,
1048 HalPolicy::OperandType::INT32,
1049 outActivationFunction,
1050 model,
1051 data);
arovir01b0717b52018-09-05 17:03:25 +01001052}
1053
1054
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001055template<typename HalPolicy,
1056 typename HalOperation = typename HalPolicy::Operation,
1057 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001058bool GetOptionalInputActivation(const HalOperation& operation,
1059 uint32_t inputIndex,
1060 ActivationFn& activationFunction,
1061 const HalModel& model,
1062 const ConversionData& data)
1063{
1064 if (operation.inputs.size() <= inputIndex)
1065 {
1066 activationFunction = ActivationFn::kActivationNone;
1067 }
1068 else
1069 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001070 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001071 {
1072 return Fail("%s: Operation has invalid inputs", __func__);
1073 }
1074 }
1075 return true;
1076}
1077
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001078template<typename HalPolicy,
1079 typename ConvolutionDescriptor,
1080 typename HalOperation = typename HalPolicy::Operation,
1081 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001082bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1083 uint32_t dilationXIndex,
1084 ConvolutionDescriptor& descriptor,
1085 const HalModel& model,
1086 const ConversionData& data)
1087{
1088 bool success = true;
1089 if (operation.inputs.size() >= dilationXIndex + 2)
1090 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001091 success &= GetInputScalar<HalPolicy>(operation,
1092 dilationXIndex,
1093 HalPolicy::OperandType::INT32,
1094 descriptor.m_DilationX,
1095 model,
1096 data);
1097 success &= GetInputScalar<HalPolicy>(operation,
1098 dilationXIndex + 1,
1099 HalPolicy::OperandType::INT32,
1100 descriptor.m_DilationY,
1101 model,
1102 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001103 }
1104
1105 return success;
1106}
1107
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001109 typename HalOperation = typename HalPolicy::Operation,
1110 typename HalModel = typename HalPolicy::Model>
1111bool GetOptionalBool(const HalOperation& operation,
1112 uint32_t inputIndex,
1113 const HalModel& model,
1114 const ConversionData& data)
1115{
1116 using HalOperand = typename HalPolicy::Operand;
1117
1118 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1119 if (!operand)
1120 {
1121 return false;
1122 }
1123
1124 if (!IsBool(*operand))
1125 {
1126 return false;
1127 }
1128
1129 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1130 if (!valueAddress)
1131 {
1132 return false;
1133 }
1134
1135 if (*(static_cast<const bool*>(valueAddress)))
1136 {
1137 return true;
1138 }
1139 else
1140 {
1141 return false;
1142 }
1143}
1144
1145template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001146 typename HalOperand = typename HalPolicy::Operand,
1147 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001148bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001149 std::vector<int32_t>& outValues,
1150 const HalModel& model,
1151 const ConversionData& data)
1152{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001153 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001154 {
1155 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1156 }
1157
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001159 if (!startAddress)
1160 {
1161 return Fail("%s: failed to get operand address", __func__, operand.type);
1162 }
1163
1164 // Check number of bytes is sensible
1165 const uint32_t numBytes = operand.location.length;
1166 if (numBytes % sizeof(int32_t) != 0)
1167 {
1168 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1169 __func__, numBytes, sizeof(int32_t));
1170 }
1171
1172 outValues.resize(numBytes / sizeof(int32_t));
1173 memcpy(outValues.data(), startAddress, numBytes);
1174 return true;
1175}
1176
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177template<typename HalPolicy,
1178 typename HalOperation = typename HalPolicy::Operation,
1179 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001180bool GetInputPaddingScheme(const HalOperation& operation,
1181 uint32_t inputIndex,
1182 PaddingScheme& outPaddingScheme,
1183 const HalModel& model,
1184 const ConversionData& data)
1185{
1186 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001187 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001188 {
1189 return Fail("%s: failed to get padding scheme input value", __func__);
1190 }
1191
1192 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1193 return true;
1194}
1195
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196template<typename HalPolicy,
1197 typename HalOperation = typename HalPolicy::Operation,
1198 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001199LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1200 uint32_t inputIndex,
1201 const HalModel& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001202 ConversionData& data,
1203 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
arovir01b0717b52018-09-05 17:03:25 +01001204{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001205 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001206 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001207 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1208
1209 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001210 if (!operand)
1211 {
1212 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1213 return LayerInputHandle();
1214 }
1215
1216 if (!IsOperandTypeSupportedForTensors(operand->type))
1217 {
1218 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1219 return LayerInputHandle();
1220 }
1221
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 try
arovir01b0717b52018-09-05 17:03:25 +01001223 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001224 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001225 if (IsDynamicTensor(operandTensorInfo))
1226 {
1227 Fail("%s: dynamic input tensors are not supported", __func__);
1228 return LayerInputHandle();
1229 }
arovir01b0717b52018-09-05 17:03:25 +01001230
Sadik Armagan44bcc022019-06-18 17:21:36 +01001231 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001232 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001233 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001234 {
1235 // NOTE: We must check whether we can support the input tensor on at least one
1236 // of the provided backends; otherwise we cannot convert the operation
1237 bool isInputSupported = false;
1238 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1239 IsInputSupported,
1240 data.m_Backends,
1241 isInputSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001242 armnn::BackendId(),
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001243 operandTensorInfo);
1244
1245 if (!isInputSupported)
1246 {
1247 Fail("%s: unsupported input tensor", __func__);
1248 return LayerInputHandle();
1249 }
1250
James Ward4e22f602020-10-20 15:50:33 +01001251 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001252 }
1253 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001254 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001255 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001256 // The tensor is either an operand internal to the model, or a model input.
1257 // It can be associated with an ArmNN output slot for an existing layer.
1258
1259 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1260 const uint32_t operandIndex = operation.inputs[inputIndex];
1261 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001262 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001263 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001264 case HalOperandLifeTime::CONSTANT_REFERENCE:
1265 {
1266 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001267 ConstTensorPin tensorPin =
1268 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1269
Sadik Armagan44bcc022019-06-18 17:21:36 +01001270 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001271 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001272 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001273 armnn::BackendId setBackend;
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001274 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1275 IsConstantSupported,
1276 data.m_Backends,
1277 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001278 setBackend,
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001279 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001280 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001281 {
1282 return LayerInputHandle();
1283 }
1284
1285 armnn::IConnectableLayer* constantLayer =
1286 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Cathal Corbett8de96f72022-09-01 13:34:59 +01001287 constantLayer->SetBackendId(setBackend);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001288 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001289 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1290 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001291
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001292 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001293 }
1294 else
1295 {
1296 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001297 return LayerInputHandle();
1298 }
arovir01b0717b52018-09-05 17:03:25 +01001299 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001300 default:
arovir01b0717b52018-09-05 17:03:25 +01001301 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001302 // Unsupported lifetime for an input tensor
1303 Fail("%s: unsupported lifetime for input tensor: %s",
1304 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001305 return LayerInputHandle();
1306 }
arovir01b0717b52018-09-05 17:03:25 +01001307 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001308 }
1309 catch (UnsupportedOperand<HalOperandType>& e)
1310 {
1311 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1312 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001313 }
1314}
1315
Kevin May42477c12020-03-26 13:34:14 +00001316
1317#ifdef ARMNN_ANDROID_NN_V1_3
1318template<typename HalPolicy>
1319LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1320 uint32_t inputIndex,
1321 const::android::hardware::neuralnetworks::V1_3::Model& model,
Keith Davis8f22bed2022-04-29 10:57:27 +01001322 ConversionData& data,
1323 const armnn::PermutationVector& dimensionMappings = g_DontPermute)
Kevin May42477c12020-03-26 13:34:14 +00001324{
1325 using HalOperand = typename HalPolicy::Operand;
1326 using HalOperandType = typename HalPolicy::OperandType;
1327 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1328
1329 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1330 if (!operand)
1331 {
1332 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1333 return LayerInputHandle();
1334 }
1335
1336 if (!IsOperandTypeSupportedForTensors(operand->type))
1337 {
1338 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1339 return LayerInputHandle();
1340 }
1341
1342 try
1343 {
1344 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001345
Kevin May42477c12020-03-26 13:34:14 +00001346 if (IsDynamicTensor(operandTensorInfo))
1347 {
Finn Williams291a16b2020-08-19 22:54:00 +01001348 data.m_DynamicInputsEncountered = true;
1349
Finn Williams9a044412020-08-17 19:08:35 +01001350 const uint32_t operandIndex = operation.inputs[inputIndex];
1351
1352 // Check if the dynamic input tensors have been inferred by one of the previous layers
1353 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001354 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001355 {
1356 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1357 }
1358 else
1359 {
1360 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1361 return LayerInputHandle();
1362 }
Kevin May42477c12020-03-26 13:34:14 +00001363 }
1364
1365 switch (operand->lifetime)
1366 {
1367 case HalOperandLifeTime::SUBGRAPH_INPUT:
1368 {
1369 // NOTE: We must check whether we can support the input tensor on at least one
1370 // of the provided backends; otherwise we cannot convert the operation
1371 bool isInputSupported = false;
1372 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1373 IsInputSupported,
1374 data.m_Backends,
1375 isInputSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001376 armnn::BackendId(),
Kevin May42477c12020-03-26 13:34:14 +00001377 operandTensorInfo);
1378
1379 if (!isInputSupported)
1380 {
1381 Fail("%s: unsupported input tensor", __func__);
1382 return LayerInputHandle();
1383 }
1384
James Ward4e22f602020-10-20 15:50:33 +01001385 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001386 }
1387 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1388 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1389 {
1390 // The tensor is either an operand internal to the model, or a model input.
1391 // It can be associated with an ArmNN output slot for an existing layer.
1392
1393 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1394 const uint32_t operandIndex = operation.inputs[inputIndex];
1395 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1396 }
1397 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1398 case HalOperandLifeTime::CONSTANT_REFERENCE:
1399 {
1400 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Keith Davis8f22bed2022-04-29 10:57:27 +01001401 ConstTensorPin tensorPin =
1402 ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data, dimensionMappings);
1403
Kevin May42477c12020-03-26 13:34:14 +00001404 if (tensorPin.IsValid())
1405 {
1406 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001407 armnn::BackendId setBackend;
Kevin May42477c12020-03-26 13:34:14 +00001408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1409 IsConstantSupported,
1410 data.m_Backends,
1411 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001412 setBackend,
Kevin May42477c12020-03-26 13:34:14 +00001413 tensorPin.GetConstTensor().GetInfo());
1414 if (!isSupported)
1415 {
1416 return LayerInputHandle();
1417 }
1418
1419 armnn::IConnectableLayer* constantLayer =
1420 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
Cathal Corbett8de96f72022-09-01 13:34:59 +01001421 constantLayer->SetBackendId(setBackend);
Kevin May42477c12020-03-26 13:34:14 +00001422 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001423 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1424 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001425
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001426 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001427 }
1428 else
1429 {
1430 Fail("%s: invalid operand tensor", __func__);
1431 return LayerInputHandle();
1432 }
1433 break;
1434 }
1435 default:
1436 {
1437 // Unsupported lifetime for an input tensor
1438 Fail("%s: unsupported lifetime for input tensor: %s",
1439 __func__, toString(operand->lifetime).c_str());
1440 return LayerInputHandle();
1441 }
1442 }
1443 }
1444 catch (UnsupportedOperand<HalOperandType>& e)
1445 {
1446 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1447 return LayerInputHandle();
1448 }
1449}
1450#endif
1451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001452template<typename HalPolicy,
1453 typename HalOperation = typename HalPolicy::Operation,
1454 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001455bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1456 uint32_t operationOutputIndex,
1457 armnn::IConnectableLayer& layer,
1458 uint32_t layerOutputIndex,
1459 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001460 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001461 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001462 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001463 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001464 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001465{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001466 using HalOperand = typename HalPolicy::Operand;
1467
1468 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001469 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1470 {
1471 return false;
1472 }
1473
1474 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001475 if (overrideOutputInfo == nullptr)
1476 {
1477 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1478 }
1479 else
1480 {
1481 outputSlot.SetTensorInfo(*overrideOutputInfo);
1482 }
1483
Finn Williamsa4983ce2020-07-23 12:55:12 +01001484 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001485 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001486 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001487 // Type one dynamic tensors require the previous layer's output shape for inference
1488 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1489 {
Mike Kellye2d611e2021-10-14 12:35:58 +01001490 if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
Sadik Armagandbda4b72020-09-03 11:33:07 +01001491 {
1492 return false;
1493 }
1494 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001495 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001496 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001497 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001498 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1499
Sadik Armagandbda4b72020-09-03 11:33:07 +01001500 if(!isSupported)
1501 {
1502 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1503 {
1504 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1505 }
1506 return false;
1507 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001508 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001509
Finn Williamsa4983ce2020-07-23 12:55:12 +01001510 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001511
1512 if (activationFunction != ActivationFn::kActivationNone)
1513 {
1514 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1515 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1516 &layer, data);
1517
1518 if (!endLayer)
1519 {
1520 return Fail("%s: ProcessActivation failed", __func__);
1521 }
1522
1523 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1524 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1525 }
1526 else
1527 {
1528 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1529 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001530
Mike Kellyb5fdf382019-06-11 16:35:25 +01001531 return true;
1532}
1533
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001534template<typename HalPolicy,
1535 typename HalOperation = typename HalPolicy::Operation,
1536 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001537armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1538 uint32_t inputIndex,
1539 const HalModel& model,
1540 ConversionData& data)
1541{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001542 using HalOperand = typename HalPolicy::Operand;
1543
1544 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001545 if (!operand)
1546 {
1547 return armnn::DataLayout::NHWC;
1548 }
1549
1550 if (!IsBool(*operand))
1551 {
1552 return armnn::DataLayout::NHWC;
1553 }
1554
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001555 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001556 if (!valueAddress)
1557 {
1558 return armnn::DataLayout::NHWC;
1559 }
1560
1561 if (*(static_cast<const bool*>(valueAddress)))
1562 {
1563 return armnn::DataLayout::NCHW;
1564 }
1565 else
1566 {
1567 return armnn::DataLayout::NHWC;
1568 }
1569}
1570
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001571template<typename HalPolicy,
1572 typename HalOperation = typename HalPolicy::Operation,
1573 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001574bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1575 uint32_t outputIndex,
1576 armnn::IConnectableLayer& layer,
1577 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001578 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001579 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001580 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1581 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001582{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001583 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1584 outputIndex,
1585 layer,
1586 outputIndex,
1587 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001588 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001589 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001590 validateFunc,
1591 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001592}
1593
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001594template<typename HalPolicy,
1595 typename HalOperation = typename HalPolicy::Operation,
1596 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001597bool ConvertToActivation(const HalOperation& operation,
1598 const char* operationName,
1599 const armnn::ActivationDescriptor& activationDesc,
1600 const HalModel& model,
1601 ConversionData& data)
1602{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001603 using HalOperand = typename HalPolicy::Operand;
1604
1605 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001606 if (!input.IsValid())
1607 {
1608 return Fail("%s: Input 0 is invalid", operationName);
1609 }
1610
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001611 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001612 if (!outputOperand)
1613 {
1614 return false;
1615 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001616
1617 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001618
1619 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001620 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001621 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1622 {
1623 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1624 IsActivationSupported,
1625 data.m_Backends,
1626 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001627 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001628 input.GetTensorInfo(),
1629 outInfo,
1630 activationDesc);
1631 };
1632
1633 if(IsDynamicTensor(outInfo))
1634 {
1635 isSupported = AreDynamicTensorsSupported();
1636 }
1637 else
1638 {
1639 validateFunc(outInfo, isSupported);
1640 }
1641
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001642 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001643 {
1644 return false;
1645 }
1646
1647 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01001648 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01001649 if (!layer)
1650 {
1651 return Fail("%s: Could not add the ActivationLayer", __func__);
1652 }
arovir01b0717b52018-09-05 17:03:25 +01001653 input.Connect(layer->GetInputSlot(0));
1654
Finn Williamsa4983ce2020-07-23 12:55:12 +01001655 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001656}
1657
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001658template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001659 typename HalOperation = typename HalPolicy::Operation,
1660 typename HalModel = typename HalPolicy::Model>
1661bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1662{
1663 armnn::ActivationDescriptor desc;
1664 desc.m_Function = armnn::ActivationFunction::ReLu;
1665
1666 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1667}
1668
1669template<typename HalPolicy,
1670 typename HalOperation = typename HalPolicy::Operation,
1671 typename HalModel = typename HalPolicy::Model>
1672bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1673{
1674 armnn::ActivationDescriptor desc;
1675 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1676 desc.m_A = 1.0f;
1677 desc.m_B = -1.0f;
1678
1679 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1680}
1681
1682template<typename HalPolicy,
1683 typename HalOperation = typename HalPolicy::Operation,
1684 typename HalModel = typename HalPolicy::Model>
1685bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1686{
1687 armnn::ActivationDescriptor desc;
1688 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1689 desc.m_A = 6.0f;
1690
1691 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1692}
1693
1694template<typename HalPolicy,
1695 typename HalOperation = typename HalPolicy::Operation,
1696 typename HalModel = typename HalPolicy::Model>
1697bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1698{
1699 armnn::ActivationDescriptor desc;
1700 desc.m_Function = armnn::ActivationFunction::TanH;
1701 desc.m_A = 1.0f; // android nn does not support tanH parameters
1702 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1703
1704 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1705}
1706
1707template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001708 typename HalOperation = typename HalPolicy::Operation,
1709 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001710bool ConvertPaddings(const HalOperation& operation,
1711 const HalModel& model,
1712 ConversionData& data,
1713 unsigned int rank,
1714 armnn::PadDescriptor& padDescriptor)
1715{
1716 using HalOperand = typename HalPolicy::Operand;
1717
1718 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1719 if (!paddingsOperand)
1720 {
1721 return Fail("%s: Could not read paddings operand", __func__);
1722 }
1723
1724 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1725 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1726 {
1727 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1728 }
1729
1730 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001731 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1732 {
1733 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1734 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001735
1736 // add padding for each dimension of input tensor.
1737 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1738 {
1739 int paddingBeforeInput = paddings[i];
1740 int paddingAfterInput = paddings[i + 1];
1741
1742 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1743 {
1744 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1745 }
1746
1747 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1748 }
1749
1750 return true;
1751}
1752
1753template<typename HalPolicy,
1754 typename HalOperation = typename HalPolicy::Operation,
1755 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001756bool ConvertPooling2d(const HalOperation& operation,
1757 const char* operationName,
1758 armnn::PoolingAlgorithm poolType,
1759 const HalModel& model,
1760 ConversionData& data)
1761{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001762 using HalOperand = typename HalPolicy::Operand;
1763 using HalOperandType = typename HalPolicy::OperandType;
1764
1765 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001766 if (!input.IsValid())
1767 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001768 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
1770
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001771 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001772 if (!output)
1773 {
1774 return Fail("%s: Could not read output 0", __func__);
1775 }
1776
1777 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1778 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1779
arovir01b0717b52018-09-05 17:03:25 +01001780 armnn::Pooling2dDescriptor desc;
1781 desc.m_PoolType = poolType;
1782 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001783 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001784
1785 ActivationFn activation;
1786
Sadik Armagan15d63e22019-07-26 16:59:35 +01001787 auto inputSize = operation.inputs.size();
1788
1789 if (inputSize >= 10)
1790 {
1791 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1792 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1793 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1794 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1795 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1796 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1797 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1798 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1799 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1800 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1801 {
1802 return Fail("%s: Operation has invalid inputs", operationName);
1803 }
1804
Kevin May42477c12020-03-26 13:34:14 +00001805 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001806 {
1807 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1808 }
1809 }
1810 else
arovir01b0717b52018-09-05 17:03:25 +01001811 {
1812 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1813 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001814 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1815 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1816 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1817 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1818 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1819 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001820 {
1821 return Fail("%s: Operation has invalid inputs", operationName);
1822 }
1823
Kevin May42477c12020-03-26 13:34:14 +00001824 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001825 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001826 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001827 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001828
1829 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1830 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1831 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1832
1833 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1834 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001835 }
1836
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001837 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001838 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001839 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1840 {
1841 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1842 IsPooling2dSupported,
1843 data.m_Backends,
1844 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001845 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001846 inputInfo,
1847 outputInfo,
1848 desc);
1849
1850 };
1851
1852 if(IsDynamicTensor(outputInfo))
1853 {
1854 isSupported = AreDynamicTensorsSupported();
1855 }
1856 else
1857 {
1858 validateFunc(outputInfo, isSupported);
1859 }
1860
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001861 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001862 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001863 return false;
arovir01b0717b52018-09-05 17:03:25 +01001864 }
arovir01b0717b52018-09-05 17:03:25 +01001865
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001866 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01001867 pooling2dLayer->SetBackendId(setBackend);
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001868 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001869 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001870 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001871 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001872
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001873 input.Connect(pooling2dLayer->GetInputSlot(0));
1874
Finn Williamsa4983ce2020-07-23 12:55:12 +01001875 if (!isSupported)
1876 {
1877 return false;
1878 }
1879
Kevin Mayfcf2a152020-09-08 16:06:32 +01001880 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1881 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001882}
1883
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001884template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001885 typename HalOperation = typename HalPolicy::Operation,
1886 typename HalModel = typename HalPolicy::Model>
1887bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001888{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001889 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001890
1891 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1892 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1893
1894 if (!input0.IsValid() || !input1.IsValid())
1895 {
1896 return Fail("%s: Operation has invalid inputs", __func__);
1897 }
1898
1899 // The FuseActivation parameter is always the input index 2
1900 // and it should be optional
1901 ActivationFn activationFunction;
1902 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1903 {
1904 return Fail("%s: Operation has invalid inputs", __func__);
1905 }
1906
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001907 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001908 if (!outputOperand)
1909 {
1910 return false;
1911 }
1912
1913 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1914 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1915
1916 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001917
1918 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01001919 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001920 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1921 {
1922 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1923 IsAdditionSupported,
1924 data.m_Backends,
1925 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01001926 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001927 inputInfo0,
1928 inputInfo1,
1929 outputInfo);
1930 };
1931
1932 if(!IsDynamicTensor(outputInfo))
1933 {
1934 validateFunc(outputInfo, isSupported);
1935 }
1936 else
1937 {
1938 isSupported = AreDynamicTensorsSupported();
1939 }
1940
Mike Kelly46272802019-08-14 17:00:48 +01001941 if (!isSupported)
1942 {
1943 return false;
1944 }
1945
1946 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01001947 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01001948
Kevin Mayfcf2a152020-09-08 16:06:32 +01001949 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1950 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001951 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001952 return false;
1953 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001954
Kevin Mayfcf2a152020-09-08 16:06:32 +01001955 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1956 data, nullptr, validateFunc, activationFunction);
1957
Mike Kelly46272802019-08-14 17:00:48 +01001958}
1959
1960template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001961 typename HalOperation = typename HalPolicy::Operation,
1962 typename HalModel = typename HalPolicy::Model>
1963bool ConvertArgMinMax(const HalOperation& operation,
1964 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001965 ConversionData& data,
1966 armnn::ArgMinMaxFunction argMinMaxFunction)
1967{
1968 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1969
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001970 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001971 using HalOperandType = typename HalPolicy::OperandType;
1972
1973 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1974
1975 if (!input0.IsValid())
1976 {
1977 return Fail("%s: Operation has invalid inputs", __func__);
1978 }
1979
1980 int32_t axis;
1981 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1982 {
1983 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1984 }
1985
1986 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1987 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1988
1989 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1990 {
1991 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1992 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1993 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1994 return Fail("%s: Axis must be in range [-n, n)", __func__);
1995 }
1996
1997 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1998 if (!output)
1999 {
2000 return Fail("%s: Could not read output 0", __func__);
2001 }
2002
2003 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
2004
2005 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002006
2007 armnn::ArgMinMaxDescriptor descriptor;
2008 descriptor.m_Function = argMinMaxFunction;
2009 descriptor.m_Axis = axis;
2010
2011 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002012 armnn::BackendId setBackend;
Finn Williamsa4983ce2020-07-23 12:55:12 +01002013 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2014 {
2015 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2016 IsArgMinMaxSupported,
2017 data.m_Backends,
2018 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002019 setBackend,
Finn Williamsa4983ce2020-07-23 12:55:12 +01002020 inputInfo0,
2021 outputInfo,
2022 descriptor);
2023 };
2024
2025 if(IsDynamicTensor(outputInfo))
2026 {
2027 isSupported = AreDynamicTensorsSupported();
2028 }
2029 else
2030 {
2031 validateFunc(outputInfo, isSupported);
2032 }
2033
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002034 if (!isSupported)
2035 {
2036 return false;
2037 }
2038
2039 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002040 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002041 if (!layer)
2042 {
2043 return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
2044 }
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002045 input0.Connect(layer->GetInputSlot(0));
2046
Finn Williamsa4983ce2020-07-23 12:55:12 +01002047 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002048}
2049
2050template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002051 typename HalOperation = typename HalPolicy::Operation,
2052 typename HalModel = typename HalPolicy::Model>
2053bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01002054{
Keith Davis6e4081f2020-09-03 13:17:21 +01002055 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01002056 using HalOperandType = typename HalPolicy::OperandType;
2057
2058 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
2059 if (operation.inputs.size() <= 1)
2060 {
2061 return Fail("%s: Operation has insufficient arguments", __func__);
2062 }
2063
2064 // Get inputs and outputs
2065 const std::size_t numInputTensors = operation.inputs.size() - 1;
2066
2067 int32_t concatDim;
2068 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2069 {
2070 return Fail("%s: Operation has invalid inputs", __func__);
2071 }
2072
2073 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2074 if (!outputOperand)
2075 {
2076 return Fail("%s: Operation has no outputs", __func__);
2077 }
2078
Keith Davis6e4081f2020-09-03 13:17:21 +01002079 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2080 armnn::TensorShape outputShape = outputInfo.GetShape();
2081 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002082 //
2083 // handle negative concat dims along the lines of tensorflow as described here:
2084 // https://www.tensorflow.org/api_docs/python/tf/concat
2085 // "negative axis refers to axis + rank(values)-th dimension"
2086 //
2087 if (concatDim < 0)
2088 {
2089 concatDim += outputShape.GetNumDimensions();
2090 }
2091
2092 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2093 {
2094 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2095 }
2096
2097 std::vector<LayerInputHandle> inputHandles;
2098 std::vector<armnn::TensorShape> inputShapes;
2099
2100 inputHandles.reserve(numInputTensors);
2101 inputShapes.reserve(numInputTensors);
2102
Keith Davis6e4081f2020-09-03 13:17:21 +01002103 bool inputsHaveBeenReshaped = false;
2104 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002105 for (uint32_t i = 0; i < numInputTensors; ++i)
2106 {
2107 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2108 if (!operand)
2109 {
2110 return Fail("%s: Operation has invalid inputs", __func__);
2111 }
2112
Teresa Charlin3b959602019-10-31 17:05:47 +00002113 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2114 if (!operandInputHandle.IsValid())
2115 {
2116 return Fail("%s: Operation has invalid inputs", __func__);
2117 }
Mike Kellyb8805202019-07-31 17:25:43 +01002118
Keith Davis6e4081f2020-09-03 13:17:21 +01002119 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002120 if (operandShape.GetNumDimensions() == 0)
2121 {
2122 return Fail("%s: Operands with rank 0 are not supported", __func__);
2123 }
2124
2125 if (RequiresReshape(operandShape))
2126 {
2127 inputsHaveBeenReshaped = true;
2128
2129 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2130
2131 // Expand the tensor to three dimensions
2132 if (operandShape.GetNumDimensions() == 2)
2133 {
2134 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2135 tensorDimensionsAdded = 1;
2136 }
2137 else
2138 {
2139 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2140 tensorDimensionsAdded = 2;
2141 }
2142
Kevin Mayaed08ac2019-12-12 16:33:31 +00002143 armnn::ReshapeDescriptor reshapeDescriptor;
2144 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2145
2146 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002147 armnn::BackendId setBackendReshape;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002148 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2149 IsReshapeSupported,
2150 data.m_Backends,
2151 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002152 setBackendReshape,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002153 operandInputHandle.GetTensorInfo(),
2154 reshapeInfo,
2155 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002156
Kevin Mayaed08ac2019-12-12 16:33:31 +00002157 if (!isSupported)
2158 {
2159 return false;
2160 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002161 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002162 newReshape.SetBackendId(setBackendReshape);
Mike Kellyb8805202019-07-31 17:25:43 +01002163
2164 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002165 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002166 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2167 }
2168
2169 inputShapes.emplace_back(operandShape);
2170 inputHandles.emplace_back(operandInputHandle);
2171
2172 if (!inputHandles.back().IsValid())
2173 {
2174 return Fail("%s: Operation has invalid inputs", __func__);
2175 }
2176 }
2177
Mike Kellye2d611e2021-10-14 12:35:58 +01002178 if (inputShapes.size() != inputHandles.size())
2179 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002180 return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002181 inputShapes.size(), inputHandles.size());
2182 }
Mike Kellyb8805202019-07-31 17:25:43 +01002183
2184 if (inputsHaveBeenReshaped)
2185 {
2186 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2187 concatDim += tensorDimensionsAdded;
2188
2189 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2190 if (tensorDimensionsAdded == 1)
2191 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 if (IsDynamicTensor(outputInfo))
2193 {
2194 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2195 }
2196 else
2197 {
2198 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2199 }
Mike Kellyb8805202019-07-31 17:25:43 +01002200 }
2201 else if (tensorDimensionsAdded == 2)
2202 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002203 if (IsDynamicTensor(outputInfo))
2204 {
2205 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2206 }
2207 else
2208 {
2209 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2210 }
Mike Kellyb8805202019-07-31 17:25:43 +01002211 }
2212 }
2213
2214 // Check if permutations is required and get the pair of permutations required for the concatenation.
2215 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2216 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002217 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002218 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2219 concatDim,
2220 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002221
Keith Davis6e4081f2020-09-03 13:17:21 +01002222 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2223 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002224 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002225 if (needPermute)
2226 {
2227 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2228 }
2229
2230 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002231 }
Mike Kellyb8805202019-07-31 17:25:43 +01002232 // this is no-op for identity swizzles, otherwise it replaces both
2233 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002234 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002235 {
2236 return false;
2237 }
Mike Kellyb8805202019-07-31 17:25:43 +01002238
2239 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2240 armnn::OriginsDescriptor concatDescriptor;
2241
2242 try
2243 {
2244 // The concat descriptor is always created across the only supported concat dimension
2245 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002246 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2247 inputShapes.end(),
2248 concatDim);
2249 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002250 {
2251 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2252 }
2253
2254 // Validate the output shape is correct given the input shapes based on the
2255 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002256 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002257 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002258 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2259 {
2260 return Fail("%s: Error validating the output shape for concat", __func__);
2261 }
Mike Kellyb8805202019-07-31 17:25:43 +01002262 }
2263
2264 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2265 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002266 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002267
Keith Davis6e4081f2020-09-03 13:17:21 +01002268 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002269 armnn::BackendId setBackendConcat;
Keith Davis6e4081f2020-09-03 13:17:21 +01002270 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
Cathal Corbett8de96f72022-09-01 13:34:59 +01002271 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2272 IsConcatSupported,
2273 data.m_Backends,
2274 isSupported,
2275 setBackendConcat,
2276 inputTensorInfos,
2277 outputInfo,
2278 concatDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002279 };
2280
2281 if (!isDynamicTensor)
2282 {
2283 validateFunc(outputInfo, isSupported);
2284 }
2285 else
2286 {
2287 isSupported = AreDynamicTensorsSupported();
2288 }
2289
Mike Kellyb8805202019-07-31 17:25:43 +01002290 if (!isSupported)
2291 {
2292 return false;
2293 }
2294
2295 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002296 layer->SetBackendId(setBackendConcat);
Mike Kellye2d611e2021-10-14 12:35:58 +01002297 if (!layer)
2298 {
2299 return Fail("%s: Could not add the ConcatLayer", __func__);
2300 }
Mike Kellyb8805202019-07-31 17:25:43 +01002301 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002302 // Connect inputs to the layer
2303 const int numInputSlots = layer->GetNumInputSlots();
Mike Kellye2d611e2021-10-14 12:35:58 +01002304
2305 if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2306 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002307 return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002308 static_cast<std::size_t>(numInputSlots), inputHandles.size());
2309 }
Mike Kellyb8805202019-07-31 17:25:43 +01002310 for (int i = 0; i < numInputSlots; ++i)
2311 {
2312 // connect the input directly to the merge (concat) layer
Mike Kelly1b46d132021-11-03 11:12:45 +00002313 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
Mike Kellyb8805202019-07-31 17:25:43 +01002314 }
2315
Keith Davis6e4081f2020-09-03 13:17:21 +01002316 // Transpose the output shape
2317 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002318 armnn::TransposeDescriptor transposeDesc;
2319 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002320 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2321 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2322 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002324 armnn::BackendId setBackendTranspose;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002325 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002326 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002327 data.m_Backends,
2328 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002329 setBackendTranspose,
Teresa Charlin185f5882020-04-06 21:59:18 +01002330 inputTransposeInfo,
2331 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002332 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002333 if (!isSupported)
2334 {
2335 return false;
2336 }
Mike Kellyb8805202019-07-31 17:25:43 +01002337 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002338 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002339 permutationPair.second);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002340 deswizzleLayer.SetBackendId(setBackendTranspose);
Mike Kellyb8805202019-07-31 17:25:43 +01002341 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002342
2343 return true;
2344 };
2345
2346 if (needPermute && !isDynamicTensor)
2347 {
2348 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002349 }
2350
2351 if (inputsHaveBeenReshaped)
2352 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002353 if (isDynamicTensor)
2354 {
2355 // Infer the output shapes of concat if outputs are type 1 dynamic
Mike Kellye2d611e2021-10-14 12:35:58 +01002356 if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2357 {
2358 return Fail("%s: TensorInfo is not set", __func__);
2359 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002360 if (!ValidateConcatOutputShape(inputShapes,
2361 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2362 concatDim))
2363 {
2364 return Fail("%s: Error validating the output shape for concat", __func__);
2365 }
2366 transposeOutputShape();
2367 }
2368
Mike Kellyb8805202019-07-31 17:25:43 +01002369 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002370 // Undo the reshape knowing the amount of dimensions added
2371 if (tensorDimensionsAdded == 1)
2372 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002373 afterConcatInfo.SetShape(
2374 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002375 }
2376 else if (tensorDimensionsAdded == 2)
2377 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002378 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002379 }
2380
Kevin Mayaed08ac2019-12-12 16:33:31 +00002381 armnn::ReshapeDescriptor reshapeDescriptor;
2382 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002383 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002384
Keith Davis6e4081f2020-09-03 13:17:21 +01002385 isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002386 armnn::BackendId setBackendReshape2;
Keith Davis6e4081f2020-09-03 13:17:21 +01002387 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2388 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2389 IsReshapeSupported,
2390 data.m_Backends,
2391 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002392 setBackendReshape2,
Keith Davis6e4081f2020-09-03 13:17:21 +01002393 concatInfo,
2394 afterConcatInfo,
2395 reshapeDescriptor);
2396 };
2397
2398 if (!IsDynamicTensor(afterConcatInfo))
2399 {
2400 validateReshapeFunc(afterConcatInfo, isSupported);
2401 }
2402 else
2403 {
2404 isSupported = AreDynamicTensorsSupported();
2405 }
2406
Kevin Mayaed08ac2019-12-12 16:33:31 +00002407 if (!isSupported)
2408 {
2409 return false;
2410 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002411 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002412 layer->SetBackendId(setBackendReshape2);
Keith Davis6e4081f2020-09-03 13:17:21 +01002413 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2414 0,
2415 *layer,
2416 model,
2417 data,
2418 nullptr,
2419 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002420 }
2421
Keith Davis6e4081f2020-09-03 13:17:21 +01002422 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002423}
2424
2425template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002426 typename HalOperation = typename HalPolicy::Operation,
2427 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002428bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2429{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002430 using HalOperand = typename HalPolicy::Operand;
2431 using HalOperandType = typename HalPolicy::OperandType;
2432
2433 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002434 if (!input.IsValid())
2435 {
2436 return Fail("%s: Operation has invalid inputs", __func__);
2437 }
2438
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002439 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002440 if (!output)
2441 {
2442 return Fail("%s: Could not read output 0", __func__);
2443 }
2444
2445 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002446 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002447
Keith Davis8f22bed2022-04-29 10:57:27 +01002448 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2449 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002450 {
2451 return Fail("%s: Operation has invalid inputs", __func__);
2452 }
2453
Keith Davis8f22bed2022-04-29 10:57:27 +01002454 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2455 if (!biasInput.IsValid())
2456 {
2457 return Fail("%s: Operation has invalid inputs", __func__);
2458 }
2459
2460 biasInput.SanitizeQuantizationScale(weightsInput, input);
2461 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2462 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002463
2464 armnn::Convolution2dDescriptor desc;
2465 desc.m_DataLayout = armnn::DataLayout::NHWC;
2466 ActivationFn activation;
2467
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002468 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002469 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002470 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2471 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2472 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2473 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2474 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2475 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002476 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002477 {
2478 return Fail("%s: Operation has invalid inputs", __func__);
2479 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002480 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002481 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002482 {
2483 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002484 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2485 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2486 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002487 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002488 {
2489 return Fail("%s: Operation has invalid inputs", __func__);
2490 }
2491
Keith Davis8f22bed2022-04-29 10:57:27 +01002492 const uint32_t kernelX = weightsInfo.GetShape()[2];
2493 const uint32_t kernelY = weightsInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002494 const uint32_t inputX = inputInfo.GetShape()[2];
2495 const uint32_t inputY = inputInfo.GetShape()[1];
2496
2497 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2498 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002499 }
2500 else
2501 {
2502 return Fail("%s: Unsupported number of operation inputs", __func__);
2503 }
2504
2505 desc.m_BiasEnabled = true;
Keith Davis8f22bed2022-04-29 10:57:27 +01002506 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002507
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002508 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002509 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002510 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2511 {
2512 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2513 IsConvolution2dSupported,
2514 data.m_Backends,
2515 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002516 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002517 inputInfo,
2518 outputInfo,
2519 desc,
Keith Davis8f22bed2022-04-29 10:57:27 +01002520 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002521 biases);
2522 };
2523
2524 if(!IsDynamicTensor(outputInfo))
2525 {
2526 validateFunc(outputInfo, isSupported);
2527 }
2528 else
2529 {
2530 isSupported = AreDynamicTensorsSupported();
2531 }
2532
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002533 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002534 {
2535 return false;
2536 }
2537
Keith Davis8f22bed2022-04-29 10:57:27 +01002538 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002539 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002540
2541 if (!startLayer)
2542 {
2543 return Fail("%s: AddConvolution2dLayer failed", __func__);
2544 }
2545
Mike Kellyb5fdf382019-06-11 16:35:25 +01002546 input.Connect(startLayer->GetInputSlot(0));
2547
Keith Davis8f22bed2022-04-29 10:57:27 +01002548 // Connect weights and bias inputs
2549 weightsInput.Connect(startLayer->GetInputSlot(1));
2550 biasInput.Connect(startLayer->GetInputSlot(2));
2551
Kevin Mayfcf2a152020-09-08 16:06:32 +01002552 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2553 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002554}
2555
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002556template<typename HalPolicy,
2557 typename HalOperation = typename HalPolicy::Operation,
2558 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002559bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2560{
2561 using HalOperand = typename HalPolicy::Operand;
2562 using HalOperandType = typename HalPolicy::OperandType;
2563
2564 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2565 if (!input.IsValid() )
2566 {
2567 return Fail("%s: Operation has invalid inputs", __func__);
2568 }
2569
2570 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2571 unsigned int rank = inputInfo.GetNumDimensions();
2572 if (rank != 4)
2573 {
2574 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2575 }
2576
2577 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2578 if (!output)
2579 {
2580 return Fail("%s: Could not read output 0", __func__);
2581 }
2582
2583 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002584
2585 armnn::DepthToSpaceDescriptor descriptor;
2586
2587 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2588 if (descriptor.m_BlockSize <= 1)
2589 {
2590 return Fail("%s: Block size must be at least 1 in all dimensions");
2591 }
2592
2593 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002594 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002595 {
2596 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2597 }
2598
2599 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002600 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002601 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2602 {
2603 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2604 IsDepthToSpaceSupported,
2605 data.m_Backends,
2606 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002607 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002608 inputInfo,
2609 outputInfo,
2610 descriptor);
2611 };
2612
2613 if(!IsDynamicTensor(outputInfo))
2614 {
2615 validateFunc(outputInfo, isSupported);
2616 }
2617 else
2618 {
2619 isSupported = AreDynamicTensorsSupported();
2620 }
2621
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002622 if (!isSupported)
2623 {
2624 return false;
2625 }
2626
2627 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002628 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002629 if (!layer)
2630 {
2631 return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2632 }
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002633 input.Connect(layer->GetInputSlot(0));
2634
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002635 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002636}
2637
2638template<typename HalPolicy,
2639 typename HalOperation = typename HalPolicy::Operation,
2640 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002641bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2642{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002643 using HalOperand = typename HalPolicy::Operand;
2644 using HalOperandType = typename HalPolicy::OperandType;
2645
2646 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002647
2648 if (!input.IsValid())
2649 {
2650 return Fail("%s: Operation has invalid inputs", __func__);
2651 }
2652
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002653 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002654
2655 if (!output)
2656 {
2657 return Fail("%s: Could not read output 0", __func__);
2658 }
2659
2660 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002661 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002662
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002663 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002664 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002665 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Cathal Corbett915f2a72022-04-15 14:12:08 +01002666 if (!weightsOperand)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002667 {
Cathal Corbett915f2a72022-04-15 14:12:08 +01002668 return Fail("%s: Could not read weights", __func__);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002669 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002670 // Basic sanity check on the weights shape.
2671 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2672 // [1, filter_height, filter_width, depth_out]
2673 if (weightsOperand->dimensions[0] != 1)
2674 {
2675 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2676 }
2677
Mike Kellyb5fdf382019-06-11 16:35:25 +01002678 armnn::DepthwiseConvolution2dDescriptor desc;
2679 desc.m_DataLayout = armnn::DataLayout::NHWC;
2680
Cathal Corbett915f2a72022-04-15 14:12:08 +01002681 LayerInputHandle weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2682 if (!weightsInput.IsValid())
Mike Kellyb5fdf382019-06-11 16:35:25 +01002683 {
2684 return Fail("%s: Operation has invalid inputs", __func__);
2685 }
2686
Cathal Corbett915f2a72022-04-15 14:12:08 +01002687 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2688 if (!biasOperand)
2689 {
2690 return Fail("%s: Could not read bias", __func__);
2691 }
2692
2693 LayerInputHandle biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
2694 if (!biasInput.IsValid())
2695 {
2696 return Fail("%s: Operation has invalid inputs", __func__);
2697 }
2698
2699 biasInput.SanitizeQuantizationScale(weightsInput, input);
2700 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
2701 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002702
2703 ActivationFn activation;
2704
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002705 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002706 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002707 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2708 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2709 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2710 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2711 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2712 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002713 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002714 {
2715 return Fail("%s: Operation has invalid inputs", __func__);
2716 }
2717 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002718 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002719 {
2720 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002721 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2722 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2723 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002724 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002725 {
2726 return Fail("%s: Operation has invalid inputs", __func__);
2727 }
2728
Cathal Corbett915f2a72022-04-15 14:12:08 +01002729 const uint32_t kernelX = weightsInfo.GetShape()[2];
2730 const uint32_t kernelY = weightsInfo.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002731 const uint32_t inputX = inputInfo.GetShape()[2];
2732 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002733
2734 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2735 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2736 }
2737 else
2738 {
2739 return Fail("%s: Unsupported number of operation inputs", __func__);
2740 }
2741
2742 desc.m_BiasEnabled = true;
Cathal Corbett915f2a72022-04-15 14:12:08 +01002743 armnn::Optional<armnn::TensorInfo> biases(biasInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002744
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002745 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002746 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002747 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2748 {
2749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2750 IsDepthwiseConvolutionSupported,
2751 data.m_Backends,
2752 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002753 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002754 inputInfo,
2755 outputInfo,
2756 desc,
Cathal Corbett915f2a72022-04-15 14:12:08 +01002757 weightsInfo,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002758 biases);
2759 };
2760
2761 if(!IsDynamicTensor(outputInfo))
2762 {
2763 validateFunc(outputInfo, isSupported);
2764 }
2765 else
2766 {
2767 isSupported = AreDynamicTensorsSupported();
2768 }
2769
2770
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002771 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002772 {
2773 return false;
2774 }
2775
Cathal Corbett915f2a72022-04-15 14:12:08 +01002776 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01002777 startLayer->SetBackendId(setBackend);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002778 if (!startLayer)
2779 {
2780 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2781 }
2782
Mike Kellyb5fdf382019-06-11 16:35:25 +01002783 input.Connect(startLayer->GetInputSlot(0));
2784
Cathal Corbett915f2a72022-04-15 14:12:08 +01002785 // Connect weights and bias inputs
2786 weightsInput.Connect(startLayer->GetInputSlot(1));
2787 biasInput.Connect(startLayer->GetInputSlot(2));
2788
Kevin Mayfcf2a152020-09-08 16:06:32 +01002789 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2790 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002791}
2792
Mike Kelly3c673942019-07-25 09:26:06 +01002793template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002794 typename HalOperation = typename HalPolicy::Operation,
2795 typename HalModel = typename HalPolicy::Model>
2796bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002797{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002798 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002799
2800 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2801 if (!input.IsValid())
2802 {
2803 return Fail("%s: Operation has invalid input", __func__);
2804 }
2805
Sadik Armagan98c0f662019-11-21 15:54:36 +00002806 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2807 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2808 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2809 {
2810 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2811 }
2812
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002813 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002814 if (!outputOperand)
2815 {
2816 return Fail("%s: Operation has invalid outputs", __func__);
2817 }
2818
2819 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002820
2821 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002822 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002823 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2824 {
2825 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2826 IsDequantizeSupported,
2827 data.m_Backends,
2828 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002829 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002830 inputInfo,
2831 outputInfo);
2832 };
2833
2834 if(IsDynamicTensor(outputInfo))
2835 {
2836 isSupported = AreDynamicTensorsSupported();
2837 }
2838 else
2839 {
2840 validateFunc(outputInfo, isSupported);
2841 }
2842
Mike Kelly46272802019-08-14 17:00:48 +01002843 if (!isSupported)
2844 {
2845 return false;
2846 }
2847
2848 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01002849 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002850 if (!layer)
2851 {
2852 return Fail("%s: Could not add the DequantizeLayer", __func__);
2853 }
Mike Kelly46272802019-08-14 17:00:48 +01002854 input.Connect(layer->GetInputSlot(0));
2855
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002856 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002857}
2858
2859template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002860 typename HalOperation = typename HalPolicy::Operation,
2861 typename HalModel = typename HalPolicy::Model>
2862bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002863{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002864 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002865
2866 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2867 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2868
2869 if (!input0.IsValid() || !input1.IsValid())
2870 {
2871 return Fail("%s: Operation has invalid inputs", __func__);
2872 }
2873
2874 // The FuseActivation parameter is always the input index 2
2875 // and it should be optional
2876 ActivationFn activationFunction;
2877 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2878 {
2879 return Fail("%s: Operation has invalid inputs", __func__);
2880 }
2881
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002882 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002883 if (!output)
2884 {
2885 return Fail("%s: Could not read output 0", __func__);
2886 }
2887
2888 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002889
2890 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002891 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002892 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2893 {
2894 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2895 IsDivisionSupported,
2896 data.m_Backends,
2897 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002898 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002899 input0.GetTensorInfo(),
2900 input1.GetTensorInfo(),
2901 outputInfo);
2902 };
2903
2904 if(!IsDynamicTensor(outputInfo))
2905 {
2906 validateFunc(outputInfo, isSupported);
2907 }
2908 else
2909 {
2910 isSupported = AreDynamicTensorsSupported();
2911 }
2912
Mike Kelly46272802019-08-14 17:00:48 +01002913 if (!isSupported)
2914 {
2915 return false;
2916 }
2917
2918 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01002919 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01002920
Kevin Mayfcf2a152020-09-08 16:06:32 +01002921 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2922 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002923 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002924 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002925 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002926
2927 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2928 data, nullptr, validateFunc, activationFunction);
2929
Mike Kelly46272802019-08-14 17:00:48 +01002930}
2931
2932template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002933 typename HalOperation = typename HalPolicy::Operation,
2934 typename HalModel = typename HalPolicy::Model>
2935bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002936{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002937 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002938
2939 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2940 if (!input.IsValid())
2941 {
2942 return Fail("%s: Operation has invalid inputs", __func__);
2943 }
2944
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002945 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002946 if (!outputOperand)
2947 {
2948 return Fail("%s: Operation has invalid outputs", __func__);
2949 }
2950
2951 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002952
2953 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01002954 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002955 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2956 {
2957 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2958 IsFloorSupported,
2959 data.m_Backends,
2960 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01002961 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002962 input.GetTensorInfo(),
2963 outputInfo);
2964 };
2965
2966 if(!IsDynamicTensor(outputInfo))
2967 {
2968 validateFunc(outputInfo, isSupported);
2969 }
2970 else
2971 {
2972 isSupported = AreDynamicTensorsSupported();
2973 }
2974
Mike Kelly46272802019-08-14 17:00:48 +01002975 if (!isSupported)
2976 {
2977 return false;
2978 }
2979
2980 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01002981 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01002982 if (!layer)
2983 {
2984 return Fail("%s: Could not add the FloorLayer", __func__);
2985 }
Mike Kelly46272802019-08-14 17:00:48 +01002986 input.Connect(layer->GetInputSlot(0));
2987
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002988 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002989}
2990
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002991inline bool IsQSymm8(const V1_0::Operand&)
2992{
2993 return false;
2994}
2995
Kevin May42477c12020-03-26 13:34:14 +00002996#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002997
2998inline bool IsQSymm8(const V1_2::Operand& operand)
2999{
3000 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
3001}
3002
3003#endif
3004
Kevin May42477c12020-03-26 13:34:14 +00003005#ifdef ARMNN_ANDROID_NN_V1_3
3006
3007inline bool IsQSymm8(const V1_3::Operand& operand)
3008{
3009 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
3010}
3011
3012#endif
3013
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003014enum class DequantizeStatus
3015{
3016 SUCCESS,
3017 NOT_REQUIRED,
3018 INVALID_OPERAND
3019};
3020
3021using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
3022
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003023template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003024 typename HalOperation = typename HalPolicy::Operation,
3025 typename HalModel = typename HalPolicy::Model>
3026DequantizeResult DequantizeIfRequired(size_t operand_index,
3027 const HalOperation& operation,
3028 const HalModel& model,
3029 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003030{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003031 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003032
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003033 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00003034 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003035 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003036 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00003037 }
3038
3039 if (IsOperandConstant<HalPolicy>(*weightsOperand))
3040 {
3041 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003042 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003043 }
3044
3045 const size_t weightsInputIndex = operation.inputs[operand_index];
3046
3047 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
3048 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00003049 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003050 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003051 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00003052 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003053 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
3054 {
3055 continue;
3056 }
3057
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003058 size_t outOpIndex = weightsInputIndex + 1;
3059 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003060 {
3061 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003062 }
3063
3064 if (outOpIndex != weightsInputIndex)
3065 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003066 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003067 }
3068
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003069 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Mike Kellye2d611e2021-10-14 12:35:58 +01003070
3071 if (!operand)
3072 {
3073 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3074 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003075
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003076 if (!IsQSymm8(*operand))
3077 {
3078 // Only supporting dequantize from QSYMM8 to FLOAT
3079 break;
3080 }
3081
3082 // Allocate a new buffer for the dequantized data and manually dequantize
3083 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3084 if (!startValue)
3085 {
3086 // Failed to get the operand address
3087 break;
3088 }
3089
3090 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3091 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003092 const float quantizationScale = operand->scale;
3093
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003094 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3095 for (size_t i = 0; i < dequantizedBufferLength; ++i)
3096 {
3097 float* dstPtr = dequantizedBuffer.get();
Mike Kellye2d611e2021-10-14 12:35:58 +01003098
3099 if (!dstPtr)
3100 {
3101 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3102 }
Mike Kelly1b46d132021-11-03 11:12:45 +00003103 *dstPtr = quantizedBuffer[i] * quantizationScale;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003104 }
3105
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003106 // Construct tensor info for dequantized ConstTensor
3107 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3108 operand->dimensions.data(),
3109 armnn::DataType::Float32);
3110
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003111 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3112 std::move(tensorInfo),
3113 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003114 }
3115
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003116 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003117}
3118
3119template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003120 typename HalOperation = typename HalPolicy::Operation,
3121 typename HalModel = typename HalPolicy::Model>
3122ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3123 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003124 const ConversionData& data,
3125 size_t operandIndex,
3126 bool optional = false)
3127{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003128 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3129
3130 DequantizeStatus status = std::get<3>(dequantized);
3131 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003132 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003133 case DequantizeStatus::INVALID_OPERAND:
3134 {
3135 // return invalid const tensor pin
3136 return ConstTensorPin();
3137 }
3138 case DequantizeStatus::NOT_REQUIRED:
3139 {
3140 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3141 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3142 }
3143 case DequantizeStatus::SUCCESS:
3144 default:
3145 {
3146 return ConstTensorPin(
3147 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3148 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003149 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003150}
3151
3152
Mike Kelly46272802019-08-14 17:00:48 +01003153template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003154 typename HalOperation = typename HalPolicy::Operation,
3155 typename HalModel = typename HalPolicy::Model>
3156bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003157{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003158 using HalOperand = typename HalPolicy::Operand;
3159
Mike Kelly46272802019-08-14 17:00:48 +01003160 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3161 if (!input.IsValid())
3162 {
3163 return Fail("%s: Operation has invalid inputs", __func__);
3164 }
3165
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003166 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003167 if (!output)
3168 {
3169 return Fail("%s: Could not read output 0", __func__);
3170 }
3171
3172 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3173 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3174
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003175 LayerInputHandle weightsInput = LayerInputHandle();
3176 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3177 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003178 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003179 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003180 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003181
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003182 // If weights are constant a separate constant layer will be created to store data.
3183 // Otherwise handle non const weights as inputs.
3184 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3185 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003186 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003187 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003188 }
3189
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003190 LayerInputHandle biasInput = LayerInputHandle();
3191 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3192 if (!biasOperand)
3193 {
3194 return Fail("%s: Could not read bias", __func__);
3195 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003196
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003197 // If bias are constant a separate constant layer will be created to store data.
3198 // Otherwise handle non const bias as inputs.
3199 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3200 if (!biasInput.IsValid())
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003201 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003202 return Fail("%s: Operation has invalid inputs", __func__);
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003203 }
3204
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003205 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003206 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003207 try
3208 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003209 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003210 }
3211 catch (const std::exception& e)
3212 {
Mike Kelly46272802019-08-14 17:00:48 +01003213 return Fail("%s: %s", __func__, e.what());
3214 }
3215
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003216 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003217 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003218 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003219
3220 ActivationFn activationFunction;
3221 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3222 {
3223 return Fail("%s: Operation has invalid inputs", __func__);
3224 }
3225
3226 armnn::FullyConnectedDescriptor desc;
3227 desc.m_TransposeWeightMatrix = true;
3228 desc.m_BiasEnabled = true;
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003229 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003230
3231 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003232 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003233 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3234 {
Finn Williams49184462020-10-02 13:28:34 +01003235 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003236 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003237 outputInfo.GetShape(),
3238 desc.m_TransposeWeightMatrix))
3239 {
3240 isSupported = false;
3241 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3242 return;
3243 }
3244
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003245 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003246 IsFullyConnectedSupported,
3247 data.m_Backends,
3248 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003249 setBackend,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003250 reshapedInfo,
3251 outputInfo,
3252 weightsInfo,
3253 biasInfo,
3254 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003255 };
3256
3257 if(!IsDynamicTensor(outputInfo))
3258 {
3259 validateFunc(outputInfo, isSupported);
3260 }
3261 else
3262 {
3263 isSupported = AreDynamicTensorsSupported();
3264 }
3265
Mike Kelly46272802019-08-14 17:00:48 +01003266 if (!isSupported)
3267 {
3268 return false;
3269 }
3270
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003271 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3272 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003273 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01003274
Kevin Mayfcf2a152020-09-08 16:06:32 +01003275 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003276 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003277 armnn::ReshapeDescriptor reshapeDescriptor;
3278 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003279
Kevin Mayfcf2a152020-09-08 16:06:32 +01003280 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003281 if (!reshapeLayer)
3282 {
3283 return Fail("%s: could not add the reshapeLayer", __func__);
3284 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003285 input.Connect(reshapeLayer->GetInputSlot(0));
3286 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3287 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003288 }
3289 else
3290 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003291 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003292 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003293
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003294 // Connect weights and bias inputs
3295 weightsInput.Connect(startLayer->GetInputSlot(1));
3296 biasInput.Connect(startLayer->GetInputSlot(2));
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003297
Kevin Mayfcf2a152020-09-08 16:06:32 +01003298 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3299 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003300}
3301
3302template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003303 typename HalOperation = typename HalPolicy::Operation,
3304 typename HalModel = typename HalPolicy::Model>
3305bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003306{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003307 using HalOperand = typename HalPolicy::Operand;
3308
Mike Kelly999e2092019-08-15 10:46:46 +01003309 if (operation.inputs.size() != 1)
3310 {
3311 return Fail("%s: Optional inputs are not supported", __func__);
3312 }
3313
Mike Kelly46272802019-08-14 17:00:48 +01003314 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3315 if (!input.IsValid())
3316 {
3317 return Fail("%s: Operation has invalid inputs", __func__);
3318 }
3319
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003320 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003321 if (!output)
3322 {
3323 return Fail("%s: Could not read output 0", __func__);
3324 }
3325
3326 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3327 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3328
Mike Kelly46272802019-08-14 17:00:48 +01003329 if (outputInfo.GetNumDimensions() != 4u)
3330 {
3331 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3332 }
3333
3334 armnn::L2NormalizationDescriptor desc;
3335 desc.m_DataLayout = armnn::DataLayout::NHWC;
3336
3337 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003338 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003339 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3340 {
3341 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3342 IsL2NormalizationSupported,
3343 data.m_Backends,
3344 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003345 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003346 inputInfo,
3347 outputInfo,
3348 desc);
3349 };
3350
3351 if(!IsDynamicTensor(outputInfo))
3352 {
3353 validateFunc(outputInfo, isSupported);
3354 }
3355 else
3356 {
3357 isSupported = AreDynamicTensorsSupported();
3358 }
3359
Mike Kelly46272802019-08-14 17:00:48 +01003360 if (!isSupported)
3361 {
3362 return false;
3363 }
3364
3365 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003366 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003367 if (!layer)
3368 {
3369 return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3370 }
Mike Kelly46272802019-08-14 17:00:48 +01003371 input.Connect(layer->GetInputSlot(0));
3372
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003373 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003374}
3375
3376template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003377 typename HalOperation = typename HalPolicy::Operation,
3378 typename HalModel = typename HalPolicy::Model>
3379bool ConvertLocalResponseNormalization(const HalOperation& operation,
3380 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003381 ConversionData& data)
3382{
Mike Kelly999e2092019-08-15 10:46:46 +01003383 if (operation.inputs.size() != 5)
3384 {
3385 return Fail("%s: Optional inputs are not supported", __func__);
3386 }
3387
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003388 using HalOperand = typename HalPolicy::Operand;
3389 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003390
3391 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3392 if (!input.IsValid())
3393 {
3394 return Fail("%s: Operation has invalid inputs", __func__);
3395 }
3396
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003397 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003398 if (!output)
3399 {
3400 return Fail("%s: Could not read output 0", __func__);
3401 }
3402
3403 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3404 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3405
Mike Kelly46272802019-08-14 17:00:48 +01003406 if (outputInfo.GetNumDimensions() != 4u)
3407 {
3408 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3409 }
3410
3411 armnn::NormalizationDescriptor descriptor;
3412 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3413 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3414 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3415
3416 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003417 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003418 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3419 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3420 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3421 {
3422 return Fail("%s: Operation has invalid inputs", __func__);
3423 }
3424
3425 // ArmNN expects normSize to be the full size of the normalization
3426 // window rather than the radius as in AndroidNN.
3427 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3428
3429 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003430 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003431 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3432 {
3433 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3434 IsNormalizationSupported,
3435 data.m_Backends,
3436 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003437 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003438 inputInfo,
3439 outputInfo,
3440 descriptor);
3441 };
3442
3443 if(!IsDynamicTensor(outputInfo))
3444 {
3445 validateFunc(outputInfo, isSupported);
3446 }
3447 else
3448 {
3449 isSupported = AreDynamicTensorsSupported();
3450 }
3451
Mike Kelly46272802019-08-14 17:00:48 +01003452 if (!isSupported)
3453 {
3454 return false;
3455 }
3456
Mike Kelly46272802019-08-14 17:00:48 +01003457 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003458 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003459 if (!layer)
3460 {
3461 return Fail("%s: Could not add the NormalizationLayer", __func__);
3462 }
Mike Kelly46272802019-08-14 17:00:48 +01003463 input.Connect(layer->GetInputSlot(0));
3464
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003465 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003466}
3467
3468template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003469 typename HalOperation = typename HalPolicy::Operation,
3470 typename HalModel = typename HalPolicy::Model>
3471bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003472{
Mike Kelly46272802019-08-14 17:00:48 +01003473 armnn::ActivationDescriptor desc;
3474 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3475
3476 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3477}
3478
3479template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003480 typename HalOperation = typename HalPolicy::Operation,
3481 typename HalModel = typename HalPolicy::Model>
3482bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003483{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003484 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003485
3486 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3487 if (!input.IsValid())
3488 {
3489 return Fail("%s: Operation has invalid inputs", __func__);
3490 }
3491
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003492 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003493 if (!output)
3494 {
3495 return Fail("%s: Could not read output 0", __func__);
3496 }
3497
3498 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003499
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003500 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003501 if (!axisOperand)
3502 {
3503 return Fail("%s: Could not read input 1", __func__);
3504 }
3505
3506 std::vector<int32_t> axis;
3507 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3508 {
3509 return Fail("%s: Input 1 has invalid values", __func__);
3510 }
3511
3512 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3513
3514 // Convert the axis to unsigned int and remove duplicates.
3515 unsigned int rank = inputInfo.GetNumDimensions();
3516 std::set<unsigned int> uniqueAxis;
3517 std::transform(axis.begin(), axis.end(),
3518 std::inserter(uniqueAxis, uniqueAxis.begin()),
3519 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3520
3521 // Get the "keep dims" flag.
3522 int32_t keepDims = 0;
3523 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3524 {
3525 return Fail("%s: Could not read input 2", __func__);
3526 }
3527
3528 armnn::MeanDescriptor descriptor;
3529 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3530 descriptor.m_KeepDims = keepDims > 0;
3531
3532 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003533 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003534 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3535 {
3536 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3537 IsMeanSupported,
3538 data.m_Backends,
3539 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003540 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003541 inputInfo,
3542 outputInfo,
3543 descriptor);
3544 };
3545
3546 if(!IsDynamicTensor(outputInfo))
3547 {
3548 validateFunc(outputInfo, isSupported);
3549 }
3550 else
3551 {
3552 isSupported = AreDynamicTensorsSupported();
3553 }
3554
Mike Kelly46272802019-08-14 17:00:48 +01003555 if (!isSupported)
3556 {
3557 return false;
3558 }
3559
3560 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003561 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003562 if (!layer)
3563 {
3564 return Fail("%s: Could not add the MeanLayer", __func__);
3565 }
Mike Kelly46272802019-08-14 17:00:48 +01003566 input.Connect(layer->GetInputSlot(0));
3567
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003568 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003569}
3570
3571template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003572 typename HalOperation = typename HalPolicy::Operation,
3573 typename HalModel = typename HalPolicy::Model>
3574bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003575{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003576 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003577
3578 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3579 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3580
3581 if (!input0.IsValid() || !input1.IsValid())
3582 {
3583 return Fail("%s: Operation has invalid inputs", __func__);
3584 }
3585
3586 // The FuseActivation parameter is always the input index 2
3587 // and it should be optional
3588 ActivationFn activationFunction;
3589 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3590 {
3591 return Fail("%s: Operation has invalid inputs", __func__);
3592 }
3593
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003594 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003595
3596 if (outputOperand == nullptr)
3597 {
3598 return false;
3599 }
3600
3601 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003602
3603 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003604 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003605 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3606 {
3607 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3608 IsMultiplicationSupported,
3609 data.m_Backends,
3610 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003611 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003612 input0.GetTensorInfo(),
3613 input1.GetTensorInfo(),
3614 outputInfo);
3615 };
3616
3617 if(!IsDynamicTensor(outputInfo))
3618 {
3619 validateFunc(outputInfo, isSupported);
3620 }
3621 else
3622 {
3623 isSupported = AreDynamicTensorsSupported();
3624 }
3625
Mike Kelly46272802019-08-14 17:00:48 +01003626 if (!isSupported)
3627 {
3628 return false;
3629 }
3630
3631 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01003632 startLayer->SetBackendId(setBackend);
Mike Kelly46272802019-08-14 17:00:48 +01003633
Kevin Mayfcf2a152020-09-08 16:06:32 +01003634 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3635 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003636 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003637 return false;
3638 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003639
Kevin Mayfcf2a152020-09-08 16:06:32 +01003640 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3641 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003642}
3643
3644template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003645 typename HalOperation = typename HalPolicy::Operation,
3646 typename HalModel = typename HalPolicy::Model>
3647bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003648{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003649 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003650
Mike Kelly3c673942019-07-25 09:26:06 +01003651 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3652 if (!input.IsValid())
3653 {
3654 return Fail("%s: Operation has invalid inputs", __func__);
3655 }
3656
3657 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3658 unsigned int rank = inputInfo.GetNumDimensions();
3659
3660 armnn::PadDescriptor descriptor;
3661 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3662 {
3663 return Fail("%s: Could not convert paddings", __func__);
3664 }
3665
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003666 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3667 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003668 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3669 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3670 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003671 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003672 {
3673 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3674 }
3675
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003676 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003677 if (!output)
3678 {
3679 return Fail("%s: Could not read output", __func__);
3680 }
3681
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003682 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003683
3684 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003685 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003686 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3687 {
3688 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3689 IsPadSupported,
3690 data.m_Backends,
3691 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003692 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003693 inputInfo,
3694 outputInfo,
3695 descriptor);
3696 };
3697
3698 if(!IsDynamicTensor(outputInfo))
3699 {
3700 validateFunc(outputInfo, isSupported);
3701 }
3702 else
3703 {
3704 isSupported = AreDynamicTensorsSupported();
3705 }
3706
Mike Kelly3c673942019-07-25 09:26:06 +01003707 if (!isSupported)
3708 {
3709 return false;
3710 }
3711
3712 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003713 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003714 if (!layer)
3715 {
3716 return Fail("%s: Could not add the PadLayer", __func__);
3717 }
Mike Kelly3c673942019-07-25 09:26:06 +01003718 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003719
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003720 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003721}
3722
Mike Kelly0a879362019-07-29 16:56:31 +01003723template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003724 typename HalOperation = typename HalPolicy::Operation,
3725 typename HalModel = typename HalPolicy::Model>
3726bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003727{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003728 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003729
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003730 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3731 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3732 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003733
3734 if (inputOperand == nullptr
3735 || requestedShapeOperand == nullptr
3736 || outputOperand == nullptr)
3737 {
3738 return Fail("%s: Operation has invalid inputs", __func__);
3739 }
3740
3741 if (requestedShapeOperand->dimensions.size() != 1)
3742 {
3743 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3744 __func__, requestedShapeOperand->dimensions.size());
3745 }
3746
3747 std::vector<int32_t> targetDimensions;
3748 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3749 {
3750 return Fail("%s: Could not read values of input 1", __func__);
3751 }
3752
3753 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3754
3755 Shape requestedShape;
3756 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3757 // function that resolves these values into a fully specified tensor shape.
3758 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3759 {
3760 return Fail("%s: Failed to resolve the requested shape", __func__);
3761 }
3762
Mike Kelly46272802019-08-14 17:00:48 +01003763 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3764 if (!input.IsValid())
3765 {
3766 return Fail("%s: Could not read input 0", __func__);
3767 }
3768
3769 armnn::ReshapeDescriptor reshapeDescriptor;
3770 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3771 requestedShape.dimensions.data());
3772
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003773 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3774
Mike Kelly46272802019-08-14 17:00:48 +01003775 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003776 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003777 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3778 {
3779 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3780 IsReshapeSupported,
3781 data.m_Backends,
3782 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003783 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003784 input.GetTensorInfo(),
3785 outputInfo,
3786 reshapeDescriptor);
3787 };
3788
3789 if(!IsDynamicTensor(outputInfo))
3790 {
3791 validateFunc(outputInfo, isSupported);
3792 }
3793 else
3794 {
3795 isSupported = AreDynamicTensorsSupported();
3796 }
3797
Mike Kelly46272802019-08-14 17:00:48 +01003798 if (!isSupported)
3799 {
3800 return false;
3801 }
3802
3803 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003804 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003805 if (!layer)
3806 {
3807 return Fail("%s: Could not add the ReshapeLayer", __func__);
3808 }
Mike Kelly46272802019-08-14 17:00:48 +01003809 input.Connect(layer->GetInputSlot(0));
3810
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003811 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003812}
3813
3814template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003815 typename HalOperation = typename HalPolicy::Operation,
3816 typename HalModel = typename HalPolicy::Model>
3817bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003818{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003819 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003820
Mike Kelly0a879362019-07-29 16:56:31 +01003821 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3822 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3823
3824 if (!input0.IsValid() || !input1.IsValid())
3825 {
3826 return Fail("%s: Operation has invalid inputs", __func__);
3827 }
3828
3829 // The FuseActivation parameter is always the input index 2
3830 // and it should be optional
3831 ActivationFn activationFunction;
3832 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3833 {
3834 return Fail("%s: Operation has invalid inputs", __func__);
3835 }
3836
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003837 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003838 if (!output)
3839 {
3840 return Fail("%s: Could not read output 0", __func__);
3841 }
3842
3843 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003844
3845 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003846 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003847 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3848 {
3849 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3850 IsSubtractionSupported,
3851 data.m_Backends,
3852 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003853 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003854 input0.GetTensorInfo(),
3855 input1.GetTensorInfo(),
3856 outputInfo);
3857 };
3858
3859 if(IsDynamicTensor(outputInfo))
3860 {
3861 isSupported = AreDynamicTensorsSupported();
3862 }
3863 else
3864 {
3865 validateFunc(outputInfo, isSupported);
3866 }
3867
Mike Kelly0a879362019-07-29 16:56:31 +01003868 if (!isSupported)
3869 {
3870 return false;
3871 }
3872
3873 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Cathal Corbett8de96f72022-09-01 13:34:59 +01003874 startLayer->SetBackendId(setBackend);
Mike Kelly0a879362019-07-29 16:56:31 +01003875
Kevin Mayfcf2a152020-09-08 16:06:32 +01003876 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3877 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003878 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003879 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003880 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003881 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3882 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003883}
3884
Finn Williams23b87b32019-07-30 11:44:05 +01003885template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003886 typename HalOperation = typename HalPolicy::Operation,
3887 typename HalModel = typename HalPolicy::Model>
3888bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003889{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003890 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003891
3892 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3893 if (!input.IsValid())
3894 {
3895 return Fail("%s: Operation has invalid inputs", __func__);
3896 }
3897
3898 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3899 unsigned int rank = inputInfo.GetNumDimensions();
3900 if (rank > 4)
3901 {
3902 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3903 }
3904
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003905 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003906 if (!output)
3907 {
3908 return Fail("%s: Could not read output 0", __func__);
3909 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003910
3911 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003912 {
3913 return Fail("%s: Dynamic output tensors are not supported", __func__);
3914 }
3915
3916 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3917 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003918 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003919
Mike Kelly46272802019-08-14 17:00:48 +01003920 std::vector<int32_t> axis;
3921 if (!axisOperand)
3922 {
Mike Kelly1b46d132021-11-03 11:12:45 +00003923 for (unsigned int i = 0; i < rank; ++i)
3924 {
3925 axis.push_back(static_cast<unsigned int>(i));
3926 }
Mike Kelly46272802019-08-14 17:00:48 +01003927 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003928 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003929 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003930 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003931 }
3932
3933 std::vector<uint32_t> outputDims;
3934 for (unsigned int i = 0; i < rank; i++)
3935 {
3936 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3937 auto currentDimension = inputInfo.GetShape()[i];
3938 if (skipSqueeze || currentDimension != 1)
3939 {
3940 outputDims.push_back(currentDimension);
3941 }
3942 }
3943
3944 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3945
3946 armnn::TensorInfo outputInfo = inputInfo;
3947 outputInfo.SetShape(outShape);
3948
3949 armnn::ReshapeDescriptor reshapeDesc;
3950 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3951
3952 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01003953 armnn::BackendId setBackend;
Mike Kelly46272802019-08-14 17:00:48 +01003954 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3955 IsReshapeSupported,
3956 data.m_Backends,
3957 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01003958 setBackend,
Mike Kelly46272802019-08-14 17:00:48 +01003959 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003960 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003961 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003962
Mike Kelly46272802019-08-14 17:00:48 +01003963 if (!isSupported)
3964 {
3965 return false;
3966 }
3967
3968 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01003969 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01003970 if (!layer)
3971 {
3972 return Fail("%s: Could not add the ReshapeLayer", __func__);
3973 }
Mike Kelly46272802019-08-14 17:00:48 +01003974 input.Connect(layer->GetInputSlot(0));
3975
3976 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3977}
3978
3979template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003980 typename HalOperation = typename HalPolicy::Operation,
3981 typename HalModel = typename HalPolicy::Model>
3982bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003983{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003984 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003985
3986 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3987 if (!input.IsValid())
3988 {
3989 return Fail("%s: Operation has invalid inputs", __func__);
3990 }
3991
3992 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3993 unsigned int rank = inputInfo.GetNumDimensions();
3994 if (rank > 4)
3995 {
3996 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3997 }
3998
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003999 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004000 if (!output)
4001 {
4002 return Fail("%s: Could not read output 0", __func__);
4003 }
4004
4005 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01004006
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004007 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4008 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4009 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01004010
4011 std::vector<int32_t> beginValues;
4012 std::vector<int32_t> endValues;
4013 std::vector<int32_t> stridesValues;
4014
4015 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004016 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01004017 {
4018 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
4019 {
4020 return false;
4021 }
4022
4023 if (operandValues.size() != rank)
4024 {
4025 return false;
4026 }
4027
4028 return true;
4029 };
4030
4031 if (!ValidateInputOperands(*beginOperand, beginValues)
4032 || !ValidateInputOperands(*endOperand, endValues)
4033 || !ValidateInputOperands(*stridesOperand, stridesValues))
4034 {
4035 return Fail("%s: Operation has invalid input operand", __func__);
4036 }
4037
4038 // Stride cannot have value '0'
4039 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
4040 {
4041 return Fail("%s: Stride must be non-zero value.", __func__);
4042 }
4043
4044 armnn::StridedSliceDescriptor descriptor;
4045 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
4046 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
4047 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
4048 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4049
4050 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
4051 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
4052 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
4053 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
4054 {
4055 return Fail("%s: Operation has invalid inputs", __func__);
4056 }
4057
4058 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004059 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004060 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4061 {
4062 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4063 IsStridedSliceSupported,
4064 data.m_Backends,
4065 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004066 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004067 inputInfo,
4068 outputInfo,
4069 descriptor);
4070 };
4071
4072 if(IsDynamicTensor(outputInfo))
4073 {
4074 isSupported = AreDynamicTensorsSupported();
4075 }
4076 else
4077 {
4078 validateFunc(outputInfo, isSupported);
4079 }
4080
Mike Kelly46272802019-08-14 17:00:48 +01004081 if (!isSupported)
4082 {
4083 return false;
4084 }
4085
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01004086 // Check if slice can fit in a inferred output
4087 armnn::TensorShape inputShape = inputInfo.GetShape();
4088 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
4089 {
4090 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01004091
4092 if (descriptor.m_ShrinkAxisMask & (1 << i))
4093 {
4094 // If the difference between the start point and the end point of the slice on an axis being shrunk
4095 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
4096 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
4097 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
4098 {
4099 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
4100 }
Ryan OShea00b586b2020-07-03 11:31:20 +01004101
4102 if(stride < 0)
4103 {
4104 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
4105 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01004106 }
4107 }
4108
Mike Kelly46272802019-08-14 17:00:48 +01004109 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004110 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004111 if (!layer)
4112 {
4113 return Fail("%s: Could not add the StridedSliceLayer", __func__);
4114 }
Mike Kelly46272802019-08-14 17:00:48 +01004115 input.Connect(layer->GetInputSlot(0));
4116
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004117 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004118}
4119
4120template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004121 typename HalOperation = typename HalPolicy::Operation,
4122 typename HalModel = typename HalPolicy::Model>
4123bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01004124{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004125 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01004126 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01004127
4128 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4129 if (!input.IsValid())
4130 {
4131 return Fail("%s: Operation has invalid inputs", __func__);
4132 }
4133
4134 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4135 unsigned int rank = inputInfo.GetNumDimensions();
4136 if (rank > 4)
4137 {
4138 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
4139 }
4140
4141 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
4142 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004143 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01004144
4145 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01004146 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01004147 {
Mike Kelly46272802019-08-14 17:00:48 +01004148 for (unsigned int i = rank; i > 0; i--)
4149 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01004150 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01004151 }
4152 }
Mike Kellyeec836e2020-02-18 10:03:30 +00004153 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01004154 {
Mike Kellyeec836e2020-02-18 10:03:30 +00004155 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01004156 }
4157
4158 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
4159
Mike Kelly4a956582020-02-28 10:32:09 +00004160 armnn::TransposeDescriptor transposeDesc;
4161 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01004162
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004163 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004164 if (!output)
4165 {
4166 return Fail("%s: Could not read output 0", __func__);
4167 }
4168
4169 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4170
4171 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004172 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004173 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4174 {
4175 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4176 IsTransposeSupported,
4177 data.m_Backends,
4178 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004179 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004180 inputInfo,
4181 outputInfo,
4182 transposeDesc);
4183 };
4184
4185 if(IsDynamicTensor(outputInfo))
4186 {
4187 isSupported = AreDynamicTensorsSupported();
4188 }
4189 else
4190 {
4191 validateFunc(outputInfo, isSupported);
4192 }
4193
Mike Kelly46272802019-08-14 17:00:48 +01004194 if (!isSupported)
4195 {
4196 return false;
4197 }
4198
Mike Kelly4a956582020-02-28 10:32:09 +00004199 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004200 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004201 if (!layer)
4202 {
4203 return Fail("%s: Could not add the TransposeLayer", __func__);
4204 }
Mike Kelly46272802019-08-14 17:00:48 +01004205 input.Connect(layer->GetInputSlot(0));
4206
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004207 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004208}
4209
4210template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004211 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004212 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004213 typename HalModel = typename HalPolicy::Model>
4214bool ConvertBatchToSpaceNd(const HalOperation& operation,
4215 const HalModel& model,
4216 ConversionData& data)
4217{
Finn Williams23b87b32019-07-30 11:44:05 +01004218
4219 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4220 if (!input.IsValid())
4221 {
4222 return Fail("%s: Operation has invalid inputs", __func__);
4223 }
4224
4225 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4226 if (!output)
4227 {
4228 return Fail("%s: Could not read output 0", __func__);
4229 }
4230
4231 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004232
4233 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4234 if (!blockOperand)
4235 {
4236 return Fail("%s: Could not read input 1", __func__);
4237 }
4238
4239 // Convert the block operand to int32
4240 std::vector<int32_t> block;
4241 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4242 {
4243 return Fail("%s: Input 1 has invalid values", __func__);
4244 }
4245
4246 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4247
4248 unsigned int rank = inputInfo.GetNumDimensions();
4249 if (rank != 4)
4250 {
4251 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4252 }
4253
4254 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4255 {
4256 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4257 " greater than or equal to 1", __func__);
4258 }
4259
4260 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4261 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4262 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4263
Kevin May42477c12020-03-26 13:34:14 +00004264 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004265 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004266 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004267 }
4268 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4269 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4270
4271 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004272 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004273 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4274 {
4275 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4276 IsBatchToSpaceNdSupported,
4277 data.m_Backends,
4278 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004279 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004280 inputInfo,
4281 outputInfo,
4282 batchToSpaceNdDesc);
4283 };
4284
4285 if(!IsDynamicTensor(outputInfo))
4286 {
4287 validateFunc(outputInfo, isSupported);
4288 }
4289 else
4290 {
4291 isSupported = AreDynamicTensorsSupported();
4292 }
4293
4294
Finn Williams23b87b32019-07-30 11:44:05 +01004295 if (!isSupported)
4296 {
4297 return false;
4298 }
4299
4300 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004301 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004302 if (!layer)
4303 {
4304 return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4305 }
Finn Williams23b87b32019-07-30 11:44:05 +01004306 input.Connect(layer->GetInputSlot(0));
4307
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004308 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004309}
Mike Kelly0a879362019-07-29 16:56:31 +01004310
Finn Williamsd74c5052019-07-30 17:06:00 +01004311template<typename HalPolicy,
4312 typename HalOperation = typename HalPolicy::Operation,
4313 typename HalOperand = typename HalPolicy::Operand,
4314 typename HalModel = typename HalPolicy::Model>
4315bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4316{
4317 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4318 if (!input.IsValid())
4319 {
4320 return Fail("%s: Operation has invalid inputs", __func__);
4321 }
4322
4323 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4324 unsigned int rank = inputInfo.GetNumDimensions();
4325 unsigned int spatialDim = rank - 2;
4326
4327 if (rank != 4)
4328 {
4329 Fail("%s: Only inputs with rank 4 are supported", __func__);
4330 }
4331
4332 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4333 if (!output)
4334 {
4335 return Fail("%s: Could not read output 0", __func__);
4336 }
4337
4338 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004339
4340 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4341 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4342
4343 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4344 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4345 {
4346 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4347 }
4348
4349 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004350 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4351 {
4352 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4353 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004354 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4355 {
4356 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4357 }
4358
4359 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4360 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4361 {
4362 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4363 }
4364
4365 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4366 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004367 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4368 {
4369 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4370 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004371 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4372 {
4373 int paddingBeforeInput = paddings[i];
4374 int paddingAfterInput = paddings[i + 1];
4375 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4376 {
4377 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4378 }
4379
Mike Kelly1b46d132021-11-03 11:12:45 +00004380 paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4381 static_cast<unsigned int>(paddingAfterInput));
Finn Williamsd74c5052019-07-30 17:06:00 +01004382 }
4383
4384 armnn::SpaceToBatchNdDescriptor descriptor;
4385 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4386 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4387 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4388
Kevin May42477c12020-03-26 13:34:14 +00004389 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004390 {
4391 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4392 }
4393
4394 bool isSupported = false;
Cathal Corbett8de96f72022-09-01 13:34:59 +01004395 armnn::BackendId setBackend;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004396 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4397 {
4398 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4399 IsSpaceToBatchNdSupported,
4400 data.m_Backends,
4401 isSupported,
Cathal Corbett8de96f72022-09-01 13:34:59 +01004402 setBackend,
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004403 inputInfo,
4404 outputInfo,
4405 descriptor);
4406 };
4407
4408 if(IsDynamicTensor(outputInfo))
4409 {
4410 isSupported = AreDynamicTensorsSupported();
4411 }
4412 else
4413 {
4414 validateFunc(outputInfo, isSupported);
4415 }
4416
Finn Williamsd74c5052019-07-30 17:06:00 +01004417 if (!isSupported)
4418 {
4419 return false;
4420 }
4421
4422 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Cathal Corbett8de96f72022-09-01 13:34:59 +01004423 layer->SetBackendId(setBackend);
Mike Kellye2d611e2021-10-14 12:35:58 +01004424 if (!layer)
4425 {
4426 return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4427 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004428 input.Connect(layer->GetInputSlot(0));
4429
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004430 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004431}
4432
saoste01b8471482018-10-10 09:44:51 +01004433} // namespace armnn_driver