blob: 473f6d78d9aa888775d8ce58602e98f55b2927b3 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010015#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000017#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000018#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010019
Mike Kelly46272802019-08-14 17:00:48 +010020#include "1.0/FullyConnected.hpp"
21
arovir01b0717b52018-09-05 17:03:25 +010022#include <ActivationFunctor.h>
23#include <CpuExecutor.h>
24#include <OperationsUtils.h>
25
James Ward4e22f602020-10-20 15:50:33 +010026#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010027
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
Sadik Armagan188675f2021-02-12 17:16:42 +000042#ifdef ARMNN_ANDROID_S
43#include <nnapi/Types.h>
44#endif
45
46
arovir01b0717b52018-09-05 17:03:25 +010047struct ConversionData
48{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 ConversionData(const std::vector<armnn::BackendId>& backends)
50 : m_Backends(backends)
51 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010052 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010053 {}
54
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010055 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010056 armnn::INetworkPtr m_Network;
57 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
58 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010059 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010060};
61
62class LayerInputHandle
63{
64public:
65 LayerInputHandle();
66 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
67
68 bool IsValid() const;
69
70 void Connect(armnn::IInputSlot& inputSlot);
71
Finn Williamsa4983ce2020-07-23 12:55:12 +010072 void Disconnect(armnn::IInputSlot& inputSlot);
73
arovir01b0717b52018-09-05 17:03:25 +010074 const armnn::TensorInfo& GetTensorInfo() const;
75
76private:
77 armnn::IOutputSlot* m_OutputSlot;
78 bool m_Valid;
79 armnn::TensorInfo m_TensorInfo;
80};
81
82class ConstTensorPin
83{
84public:
85 // Creates an invalid tensor pin (can be used to signal errors)
86 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
87 ConstTensorPin(bool optional = false);
88
89 // @param tensorInfo TensorInfo associated with the tensor.
90 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
91 // the model being converted.
92 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010093 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010094 const armnn::PermutationVector& mappings);
95
96 ConstTensorPin(const ConstTensorPin& other) = delete;
97 ConstTensorPin(ConstTensorPin&& other) = default;
98
99 bool IsValid() const;
100 bool IsOptional() const;
101
102 const armnn::ConstTensor& GetConstTensor() const;
103 const armnn::ConstTensor* GetConstTensorPtr() const;
104
105private:
106 armnn::ConstTensor m_ConstTensor;
107
108 // Owned memory for swizzled tensor data, only required if the tensor needed
109 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
110 // the pools associated with the model being converted.
111 std::vector<uint8_t> m_SwizzledTensorData;
112
113 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
114 bool m_Optional;
115};
116
117} // namespace armnn_driver
118
119///
120/// Utility functions
121///
122
123namespace
124{
125
126using namespace armnn_driver;
127using namespace android::nn;
128
129// Convenience function to log the reason for failing to convert a model.
130// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
131template<class... Args>
132static bool Fail(const char* formatStr, Args&&... args)
133{
134 ALOGD(formatStr, std::forward<Args>(args)...);
135 return false;
136}
137
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
139// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
140#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100141try \
142{ \
143 for (auto&& backendId : backends) \
144 { \
145 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000146 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 std::string reasonIfUnsupported; \
149 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000150 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100151 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100153 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100154 } \
155 else \
156 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100157 if (reasonIfUnsupported.size() > 0) \
158 { \
159 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
160 } \
161 else \
162 { \
163 ALOGD("%s: not supported by armnn", funcName); \
164 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100165 } \
166 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100167 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100168 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100169 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100170 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100171 } \
172 if (!supported) \
173 { \
174 ALOGD("%s: not supported by any specified backend", funcName); \
175 } \
176} \
177catch (const armnn::InvalidArgumentException &e) \
178{ \
179 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
180}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100181
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000182template<typename HalOperand>
183armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100184{
185 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
186}
187
Matthew Bentham912b3622019-05-03 15:49:14 +0100188inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100189{
Matthew Bentham912b3622019-05-03 15:49:14 +0100190 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
191 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
192 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100193}
194
Kevin May42477c12020-03-26 13:34:14 +0000195#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100196
Keith Davis71006492020-01-06 17:44:16 +0000197// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100198inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
199{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000200 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000201 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000202 type == V1_2::OperandType::TENSOR_FLOAT16 ||
203 type == V1_2::OperandType::TENSOR_FLOAT32 ||
204 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000205 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000206 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
207 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100208 type == V1_2::OperandType::TENSOR_INT32;
209}
210
211#endif
212
Kevin May42477c12020-03-26 13:34:14 +0000213#ifdef ARMNN_ANDROID_NN_V1_3
214
215// Support within the 1.3 driver for specific tensor data types
216inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
217{
218 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100219 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000220 type == V1_3::OperandType::TENSOR_FLOAT16 ||
221 type == V1_3::OperandType::TENSOR_FLOAT32 ||
222 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100223 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000224 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
225 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
226 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
227 type == V1_3::OperandType::TENSOR_INT32;
228}
229
230#endif
231
Mike Kellyb5fdf382019-06-11 16:35:25 +0100232inline bool IsBool(V1_0::Operand)
233{
234 return false;
235}
236
Kevin May42477c12020-03-26 13:34:14 +0000237inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100238{
239 return false;
240}
241
Kevin May42477c12020-03-26 13:34:14 +0000242#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100243
244inline bool IsBool(V1_2::Operand operand)
245{
246 return operand.type == V1_2::OperandType::BOOL;
247}
248
Sadik Armagan61113162019-07-25 09:09:40 +0100249/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000250inline bool Is12OrLaterOperand(V1_2::Operand)
251{
252 return true;
253}
254
255#endif
256
257#ifdef ARMNN_ANDROID_NN_V1_3
258
259inline bool IsBool(V1_3::Operand operand)
260{
261 return operand.type == V1_3::OperandType::BOOL;
262}
263
264/// Checks if a operand is 1_2 Operand
265inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100266{
267 return true;
268}
269
Mike Kellyb5fdf382019-06-11 16:35:25 +0100270#endif
271
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100272template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000273armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
274 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275 armnn::TensorInfo reshapeInfo)
276{
277 armnn::ReshapeDescriptor reshapeDescriptor;
278 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
279
280 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100281 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100282
283 // Attach the input layer to the reshape layer
284 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
285 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
286
287 return *reshapeLayer;
288}
289
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000290bool BroadcastTensor(LayerInputHandle& input0,
291 LayerInputHandle& input1,
292 armnn::IConnectableLayer* startLayer,
293 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100294{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100295 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100296
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100297 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
298 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
299
300 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
301 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
302
303 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100304 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
306 input0.Connect(startLayer->GetInputSlot(0));
307 input1.Connect(startLayer->GetInputSlot(1));
308
Sadik Armagan64b19b52019-08-19 09:49:58 +0100309 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 }
311
312 // Since the number of dimensions do not match then we need to add degenerate dimensions
313 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
314
315 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100316 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
317 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100318
319 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
320 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
321 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
322
323 const armnn::TensorShape& smallShape = smallInfo.GetShape();
324 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
325 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
326 {
327 reshapedDimensions[i] = smallShape[i - sizeDifference];
328 }
329
330 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100331 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100332 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100333
334 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
335 armnn::ReshapeDescriptor reshapeDescriptor;
336
337 bool isSupported = false;
338 FORWARD_LAYER_SUPPORT_FUNC(__func__,
339 IsReshapeSupported,
340 data.m_Backends,
341 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000342 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100343 reshapedInfo,
344 reshapeDescriptor);
345 if (!isSupported)
346 {
347 return false;
348 }
349
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100350 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100351 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352
353 if (input0IsSmaller)
354 {
355 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
356 //
357 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100358 // | |
359 // Reshape |
360 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100361 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100362
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100363 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
364 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100365 }
366 else
367 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100368 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
369 //
370 // Input0 Input1
371 // | |
372 // | Reshape
373 // \ /
374 // StartLayer
375
arovir01b0717b52018-09-05 17:03:25 +0100376 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100377 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100378 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100379
380 return true;
arovir01b0717b52018-09-05 17:03:25 +0100381}
382
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000383void CalcPadding(uint32_t input,
384 uint32_t kernel,
385 uint32_t stride,
386 uint32_t& outPadHead,
387 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100388 android::nn::PaddingScheme scheme)
389{
390 int32_t padHead;
391 int32_t padTail;
392 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100393 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
394 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100395}
396
Kevin May42477c12020-03-26 13:34:14 +0000397#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100398
399void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
400 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
401{
402 int32_t padHead;
403 int32_t padTail;
404 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100405 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
406 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100407}
408
Mike Kelly26123db2020-01-15 10:02:33 +0000409void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100410 int32_t& outPadTail, android::nn::PaddingScheme scheme)
411{
412 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
413}
414
Mike Kelly86b36d42019-07-12 16:39:33 +0100415#endif
416
Matthew Bentham912b3622019-05-03 15:49:14 +0100417Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100418{
419 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100420 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100421 shape.dimensions = operand.dimensions;
422 shape.scale = operand.scale;
423 shape.offset = operand.zeroPoint;
424 return shape;
425}
426
Kevin May42477c12020-03-26 13:34:14 +0000427#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100428
429Shape GetOperandShape(const V1_2::Operand& operand)
430{
431 Shape shape;
432 shape.type = OperandType(operand.type);
433 shape.dimensions = operand.dimensions;
434 shape.scale = operand.scale;
435 shape.offset = operand.zeroPoint;
436 return shape;
437}
438
439#endif
440
Kevin May42477c12020-03-26 13:34:14 +0000441#ifdef ARMNN_ANDROID_NN_V1_3
442
443Shape GetOperandShape(const V1_3::Operand& operand)
444{
445 Shape shape;
446 shape.type = OperandType(operand.type);
447 shape.dimensions = operand.dimensions;
448 shape.scale = operand.scale;
449 shape.offset = operand.zeroPoint;
450 return shape;
451}
452
453#endif
454
arovir01b0717b52018-09-05 17:03:25 +0100455// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
456// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100457// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
458// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100459void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 const armnn::TensorInfo& weightInfo,
461 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100462{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000463 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100464 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000465 // NOTE: Bias scale is always set to 0 for per-axis quantization and
466 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
467 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100468 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000469 return biasScale * inputInfo.GetQuantizationScale();
470 };
471
472 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
473 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
474
475 biasInfo.SetQuantizationScales(biasScales);
476 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
477
478 ALOGV("Bias quantization params have been updated for per-axis quantization");
479 }
480 else
481 {
482 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
483 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
484 {
James Ward4e22f602020-10-20 15:50:33 +0100485 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000486 {
487 ALOGW("Bias quantization scale has been modified to match input * weights");
488 biasInfo.SetQuantizationScale(expectedBiasScale);
489 }
arovir01b0717b52018-09-05 17:03:25 +0100490 }
491 }
492}
493
494// 4D Tensor Permutations
495const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100496const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100497const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
498
499// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000500const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
501const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100502
503template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000504armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
505 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100506{
507 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000508 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100509
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100510 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100511
512 // Connect input to swizzle layer
513 input.Connect(layer->GetInputSlot(0));
514
515 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000516 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100517 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
518
519 return *layer;
520}
521
arovir01b0717b52018-09-05 17:03:25 +0100522bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
523 const armnn::TensorShape & outputShape,
524 uint32_t concatDim)
525{
526 // Validate the output shape is correct given the input shapes (which have just been validated)
527 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
528 if (outputShape.GetNumDimensions() != numDimensions)
529 {
530 return Fail("%s: Output shape has wrong number of dimensions", __func__);
531 }
532
533 unsigned int outputSizeAlongConcatenatedDimension = 0;
534 for (unsigned int i = 0; i < inputShapes.size(); i++)
535 {
536 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
537 }
538
539 for (unsigned int i = 0; i < numDimensions; ++i)
540 {
541 if (i == concatDim)
542 {
543 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
544 {
545 return Fail(
546 "%s: Invalid output shape for dimension %d (%d != %d)",
547 __func__,
548 i,
549 outputShape[i],
550 outputSizeAlongConcatenatedDimension);
551 }
552 }
553 else
554 {
555 if (outputShape[i] != inputShapes[0][i])
556 {
557 return Fail("%s: Invalid output shape", __func__);
558 }
559 }
560 }
561
562 return true;
563}
564
565bool RequiresReshape(armnn::TensorShape & inputShape)
566{
567 return inputShape.GetNumDimensions() < 3;
568}
569
arovir01b0717b52018-09-05 17:03:25 +0100570void SwizzleInputs(armnn::INetwork& network,
571 std::vector<LayerInputHandle>& inputs,
572 std::vector<armnn::TensorShape>& inputShapes,
573 const armnn::PermutationVector& mapping)
574{
575 if (!mapping.IsEqual(IdentityPermutation4D))
576 {
577 size_t nInputs = inputs.size();
578 for (size_t i=0; i<nInputs; ++i)
579 {
580 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000581 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100582 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
583 auto& outputInfo = outputSlot.GetTensorInfo();
584 // replace inputs with the swizzled ones
585 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
586 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
587 }
588 }
589}
590
Teresa Charlin185f5882020-04-06 21:59:18 +0100591bool TransposeInputTensors(ConversionData& data,
592 std::vector<LayerInputHandle>& inputs,
593 std::vector<armnn::TensorShape>& inputShapes,
594 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000595{
David Monahan7f492ac2020-10-16 10:36:29 +0100596 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
597 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000598 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100599 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000600 size_t nInputs = inputs.size();
601 for (size_t i=0; i<nInputs; ++i)
602 {
603 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000604 armnn::TransposeDescriptor transposeDesc;
605 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100606 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000607
608 bool isSupported = false;
609 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000610 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000611 data.m_Backends,
612 isSupported,
613 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100614 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000615 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000616 if (!isSupported)
617 {
618 return false;
619 }
620
621 }
622 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
623 }
624 return true;
625}
626
627
narpra01f176d5a2018-11-18 20:17:48 +0000628bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
629 int32_t & concatDimension,
630 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100631{
narpra01f176d5a2018-11-18 20:17:48 +0000632 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100633 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100634
635 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000636 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
637 // or along dimension 0 or 2 for a 3-D tensor.
638 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100639 {
narpra01f176d5a2018-11-18 20:17:48 +0000640 concatDimension = 1;
641 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
642 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100643 }
narpra01f176d5a2018-11-18 20:17:48 +0000644 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100645 {
narpra01f176d5a2018-11-18 20:17:48 +0000646 concatDimension = 0;
647 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
648 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100649 }
David Monahan7f492ac2020-10-16 10:36:29 +0100650 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
651 // permutation identity to only have 3 dimensions
652 else if (numberOfDimensions == 3 && concatDimension == 2)
653 {
654 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
655 }
narpra01f176d5a2018-11-18 20:17:48 +0000656 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100657}
658
659} // anonymous namespace
660
661namespace armnn_driver
662{
663
664//// Creates an ArmNN activation layer and connects it to the given layer, if the
665//// passed in AndroidNN activation function requires so.
666//// @return The end layer of the sequence of layers built for the given AndroidNN
667//// activation function or nullptr if an error occurred (e.g. unsupported activation).
668//// Note that the end layer matches the input layer if no activation is required
669//// (the sequence of layers has length 1).
670armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
671 ActivationFn activation,
672 armnn::IConnectableLayer* prevLayer,
673 ConversionData& data);
674
675} // namespace armnn_driver
676
677///
678/// Utility templates
679///
680
681namespace armnn_driver
682{
683
684using namespace android::nn;
685
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100686template<typename HalPolicy,
687 typename HalOperand = typename HalPolicy::Operand,
688 typename HalOperation = typename HalPolicy::Operation,
689 typename HalModel = typename HalPolicy::Model>
690const HalOperand* GetInputOperand(const HalOperation& operation,
691 uint32_t inputIndex,
692 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100693 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100694{
695 if (inputIndex >= operation.inputs.size())
696 {
saoste01b8471482018-10-10 09:44:51 +0100697 if (failOnIndexOutOfBounds)
698 {
699 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
700 }
arovir01b0717b52018-09-05 17:03:25 +0100701 return nullptr;
702 }
703
Kevin May42477c12020-03-26 13:34:14 +0000704 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100705 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000706 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100707}
708
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709template<typename HalPolicy,
710 typename HalOperand = typename HalPolicy::Operand,
711 typename HalOperation = typename HalPolicy::Operation,
712 typename HalModel = typename HalPolicy::Model>
713const HalOperand* GetOutputOperand(const HalOperation& operation,
714 uint32_t outputIndex,
715 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100716{
717 if (outputIndex >= operation.outputs.size())
718 {
719 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
720 return nullptr;
721 }
722
723 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100724 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100725
Kevin May42477c12020-03-26 13:34:14 +0000726 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100727}
728
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100729template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100730 typename HalOperand = typename HalPolicy::Operand,
731 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100733 const HalModel& model,
734 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000735 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100736{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100738
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100739 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100740 switch (operand.lifetime)
741 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100743 {
744 // Constant found in model.operandValues
745 valueStart = &model.operandValues[operand.location.offset];
746 break;
747 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100748 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100749 {
750 // Constant specified via a Memory object
751 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
752 break;
753 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000755 {
756 // An optional input tensor with no values is not an error so should not register as a fail
757 if (optional)
758 {
759 valueStart = nullptr;
760 break;
761 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100762 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000763 }
arovir01b0717b52018-09-05 17:03:25 +0100764 default:
765 {
766 // Unsupported/invalid (e.g. can't get value of an input to the model)
767 Fail("%s: unsupported/invalid operand lifetime: %s",
768 __func__, toString(operand.lifetime).c_str());
769 valueStart = nullptr;
770 }
771 }
772
773 return valueStart;
774}
775
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100777 typename HalOperation = typename HalPolicy::Operation,
778 typename HalModel = typename HalPolicy::Model,
779 typename HalOperandType = typename HalPolicy::OperandType>
780bool GetOperandType(const HalOperation& operation,
781 uint32_t inputIndex,
782 const HalModel& model,
783 HalOperandType& type)
784{
785 using HalOperand = typename HalPolicy::Operand;
786
787 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
788 if (!operand)
789 {
790 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
791 }
792
793 type = operand->type;
794 return true;
795}
796
797template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000798 typename HalOperand = typename HalPolicy::Operand>
799bool IsOperandConstant(const HalOperand& operand)
800{
801 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
802
803 HalOperandLifeTime lifetime = operand.lifetime;
804
805 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
806 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
807 lifetime == HalOperandLifeTime::NO_VALUE;
808}
809
810template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 typename HalOperand = typename HalPolicy::Operand,
812 typename HalModel = typename HalPolicy::Model>
813ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
814 const HalModel& model,
815 const ConversionData& data,
816 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
817 const armnn::TensorShape* overrideTensorShape = nullptr,
818 bool optional = false)
819{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820 if (!IsOperandTypeSupportedForTensors(operand.type))
821 {
822 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
823 return ConstTensorPin();
824 }
825
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000826 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100827 {
828 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
829 return ConstTensorPin();
830 }
831
832 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
833 if (!valueStart)
834 {
835 if (optional)
836 {
837 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
838 return ConstTensorPin(true);
839 }
840 // mandatory tensor with no values
841 Fail("%s: failed to get operand address", __func__);
842 return ConstTensorPin();
843 }
844
845 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000846
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 if (overrideTensorShape != nullptr)
848 {
849 tensorInfo.SetShape(*overrideTensorShape);
850 }
851 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
852}
853
854template<typename HalPolicy,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
857ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
858 uint32_t inputIndex,
859 const HalModel& model,
860 const ConversionData& data,
861 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
862 const armnn::TensorShape* overrideTensorShape = nullptr,
863 bool optional = false)
864{
865 using HalOperand = typename HalPolicy::Operand;
866
867 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
868 if (!operand)
869 {
870 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
871 return ConstTensorPin();
872 }
873 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
874 model,
875 data,
876 dimensionMappings,
877 overrideTensorShape,
878 optional);
879}
880
881template<typename HalPolicy,
882 typename OutputType,
883 typename HalOperandType = typename HalPolicy::OperandType,
884 typename HalOperation = typename HalPolicy::Operation,
885 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100886bool GetInputScalar(const HalOperation& operation,
887 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100888 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100889 OutputType& outValue,
890 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100891 const ConversionData& data,
892 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100893{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100894 using HalOperand = typename HalPolicy::Operand;
895
896 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100898 {
899 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
900 }
901
Sadik Armagan813f2302020-05-19 14:10:30 +0100902 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100903 {
904 return Fail("%s: unexpected operand type: %s (should be %s)",
905 __func__, toString(operand->type).c_str(), toString(type).c_str());
906 }
907
Sadik Armagan813f2302020-05-19 14:10:30 +0100908 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100909 {
910 return Fail("%s: incorrect operand location length: %i (should be %i)",
911 __func__, operand->location.length, sizeof(OutputType));
912 }
913
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100915 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100916 {
917 return Fail("%s: failed to get address for operand", __func__);
918 }
919
Sadik Armagan813f2302020-05-19 14:10:30 +0100920 if(!optional)
921 {
922 outValue = *(static_cast<const OutputType*>(valueAddress));
923 }
924
arovir01b0717b52018-09-05 17:03:25 +0100925 return true;
926}
927
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928template<typename HalPolicy,
929 typename HalOperation = typename HalPolicy::Operation,
930 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100931bool GetInputInt32(const HalOperation& operation,
932 uint32_t inputIndex,
933 int32_t& outValue,
934 const HalModel& model,
935 const ConversionData& data)
936{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100938}
939
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940template<typename HalPolicy,
941 typename HalOperation = typename HalPolicy::Operation,
942 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100943bool GetInputFloat32(const HalOperation& operation,
944 uint32_t inputIndex,
945 float& outValue,
946 const HalModel& model,
947 const ConversionData& data)
948{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100949 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100950}
951
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952template<typename HalPolicy,
953 typename HalOperation = typename HalPolicy::Operation,
954 typename HalOperandType = typename HalPolicy::OperandType,
955 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100956bool GetInputActivationFunctionImpl(const HalOperation& operation,
957 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100958 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100959 ActivationFn& outActivationFunction,
960 const HalModel& model,
961 const ConversionData& data)
962{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100963 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100964 {
965 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
966 __func__,
967 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000968 toString(HalOperandType::INT32).c_str(),
969 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100970 }
971
972 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100974 {
975 return Fail("%s: failed to get activation input value", __func__);
976 }
977 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
978 return true;
979}
980
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981template<typename HalPolicy,
982 typename HalOperation = typename HalPolicy::Operation,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetInputActivationFunction(const HalOperation& operation,
985 uint32_t inputIndex,
986 ActivationFn& outActivationFunction,
987 const HalModel& model,
988 const ConversionData& data)
989{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990 return GetInputActivationFunctionImpl<HalPolicy>(operation,
991 inputIndex,
992 HalPolicy::OperandType::INT32,
993 outActivationFunction,
994 model,
995 data);
arovir01b0717b52018-09-05 17:03:25 +0100996}
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998template<typename HalPolicy,
999 typename HalOperation = typename HalPolicy::Operation,
1000 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001001bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1002 uint32_t inputIndex,
1003 ActivationFn& outActivationFunction,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
1007 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1009 inputIndex,
1010 HalPolicy::OperandType::INT32,
1011 outActivationFunction,
1012 model,
1013 data);
arovir01b0717b52018-09-05 17:03:25 +01001014}
1015
1016
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017template<typename HalPolicy,
1018 typename HalOperation = typename HalPolicy::Operation,
1019 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001020bool GetOptionalInputActivation(const HalOperation& operation,
1021 uint32_t inputIndex,
1022 ActivationFn& activationFunction,
1023 const HalModel& model,
1024 const ConversionData& data)
1025{
1026 if (operation.inputs.size() <= inputIndex)
1027 {
1028 activationFunction = ActivationFn::kActivationNone;
1029 }
1030 else
1031 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001033 {
1034 return Fail("%s: Operation has invalid inputs", __func__);
1035 }
1036 }
1037 return true;
1038}
1039
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001040template<typename HalPolicy,
1041 typename ConvolutionDescriptor,
1042 typename HalOperation = typename HalPolicy::Operation,
1043 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001044bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1045 uint32_t dilationXIndex,
1046 ConvolutionDescriptor& descriptor,
1047 const HalModel& model,
1048 const ConversionData& data)
1049{
1050 bool success = true;
1051 if (operation.inputs.size() >= dilationXIndex + 2)
1052 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001053 success &= GetInputScalar<HalPolicy>(operation,
1054 dilationXIndex,
1055 HalPolicy::OperandType::INT32,
1056 descriptor.m_DilationX,
1057 model,
1058 data);
1059 success &= GetInputScalar<HalPolicy>(operation,
1060 dilationXIndex + 1,
1061 HalPolicy::OperandType::INT32,
1062 descriptor.m_DilationY,
1063 model,
1064 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001065 }
1066
1067 return success;
1068}
1069
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001070template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001071 typename HalOperation = typename HalPolicy::Operation,
1072 typename HalModel = typename HalPolicy::Model>
1073bool GetOptionalBool(const HalOperation& operation,
1074 uint32_t inputIndex,
1075 const HalModel& model,
1076 const ConversionData& data)
1077{
1078 using HalOperand = typename HalPolicy::Operand;
1079
1080 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1081 if (!operand)
1082 {
1083 return false;
1084 }
1085
1086 if (!IsBool(*operand))
1087 {
1088 return false;
1089 }
1090
1091 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1092 if (!valueAddress)
1093 {
1094 return false;
1095 }
1096
1097 if (*(static_cast<const bool*>(valueAddress)))
1098 {
1099 return true;
1100 }
1101 else
1102 {
1103 return false;
1104 }
1105}
1106
1107template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 typename HalOperand = typename HalPolicy::Operand,
1109 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001110bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001111 std::vector<int32_t>& outValues,
1112 const HalModel& model,
1113 const ConversionData& data)
1114{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001115 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001116 {
1117 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1118 }
1119
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001120 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001121 if (!startAddress)
1122 {
1123 return Fail("%s: failed to get operand address", __func__, operand.type);
1124 }
1125
1126 // Check number of bytes is sensible
1127 const uint32_t numBytes = operand.location.length;
1128 if (numBytes % sizeof(int32_t) != 0)
1129 {
1130 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1131 __func__, numBytes, sizeof(int32_t));
1132 }
1133
1134 outValues.resize(numBytes / sizeof(int32_t));
1135 memcpy(outValues.data(), startAddress, numBytes);
1136 return true;
1137}
1138
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001139template<typename HalPolicy,
1140 typename HalOperation = typename HalPolicy::Operation,
1141 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001142bool GetInputPaddingScheme(const HalOperation& operation,
1143 uint32_t inputIndex,
1144 PaddingScheme& outPaddingScheme,
1145 const HalModel& model,
1146 const ConversionData& data)
1147{
1148 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001149 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001150 {
1151 return Fail("%s: failed to get padding scheme input value", __func__);
1152 }
1153
1154 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1155 return true;
1156}
1157
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158template<typename HalPolicy,
1159 typename HalOperation = typename HalPolicy::Operation,
1160 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001161LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1162 uint32_t inputIndex,
1163 const HalModel& model,
1164 ConversionData& data)
1165{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001166 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001167 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1169
1170 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001171 if (!operand)
1172 {
1173 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1174 return LayerInputHandle();
1175 }
1176
1177 if (!IsOperandTypeSupportedForTensors(operand->type))
1178 {
1179 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1180 return LayerInputHandle();
1181 }
1182
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 try
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001186 if (IsDynamicTensor(operandTensorInfo))
1187 {
1188 Fail("%s: dynamic input tensors are not supported", __func__);
1189 return LayerInputHandle();
1190 }
arovir01b0717b52018-09-05 17:03:25 +01001191
Sadik Armagan44bcc022019-06-18 17:21:36 +01001192 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001193 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001194 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001195 {
1196 // NOTE: We must check whether we can support the input tensor on at least one
1197 // of the provided backends; otherwise we cannot convert the operation
1198 bool isInputSupported = false;
1199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1200 IsInputSupported,
1201 data.m_Backends,
1202 isInputSupported,
1203 operandTensorInfo);
1204
1205 if (!isInputSupported)
1206 {
1207 Fail("%s: unsupported input tensor", __func__);
1208 return LayerInputHandle();
1209 }
1210
James Ward4e22f602020-10-20 15:50:33 +01001211 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001212 }
1213 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001214 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001215 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001216 // The tensor is either an operand internal to the model, or a model input.
1217 // It can be associated with an ArmNN output slot for an existing layer.
1218
1219 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1220 const uint32_t operandIndex = operation.inputs[inputIndex];
1221 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001223 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001224 case HalOperandLifeTime::CONSTANT_REFERENCE:
1225 {
1226 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1227 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1228 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001229 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001230 bool isSupported = false;
1231 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1232 IsConstantSupported,
1233 data.m_Backends,
1234 isSupported,
1235 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001236 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001237 {
1238 return LayerInputHandle();
1239 }
1240
1241 armnn::IConnectableLayer* constantLayer =
1242 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1243 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1244 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1245
1246 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1247 }
1248 else
1249 {
1250 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001251 return LayerInputHandle();
1252 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001253 break;
arovir01b0717b52018-09-05 17:03:25 +01001254 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001255 default:
arovir01b0717b52018-09-05 17:03:25 +01001256 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001257 // Unsupported lifetime for an input tensor
1258 Fail("%s: unsupported lifetime for input tensor: %s",
1259 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001260 return LayerInputHandle();
1261 }
arovir01b0717b52018-09-05 17:03:25 +01001262 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001263 }
1264 catch (UnsupportedOperand<HalOperandType>& e)
1265 {
1266 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1267 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001268 }
1269}
1270
Kevin May42477c12020-03-26 13:34:14 +00001271
1272#ifdef ARMNN_ANDROID_NN_V1_3
1273template<typename HalPolicy>
1274LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1275 uint32_t inputIndex,
1276 const::android::hardware::neuralnetworks::V1_3::Model& model,
1277 ConversionData& data)
1278{
1279 using HalOperand = typename HalPolicy::Operand;
1280 using HalOperandType = typename HalPolicy::OperandType;
1281 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1282
1283 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1284 if (!operand)
1285 {
1286 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1287 return LayerInputHandle();
1288 }
1289
1290 if (!IsOperandTypeSupportedForTensors(operand->type))
1291 {
1292 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1293 return LayerInputHandle();
1294 }
1295
1296 try
1297 {
1298 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001299
Kevin May42477c12020-03-26 13:34:14 +00001300 if (IsDynamicTensor(operandTensorInfo))
1301 {
Finn Williams291a16b2020-08-19 22:54:00 +01001302 data.m_DynamicInputsEncountered = true;
1303
Finn Williams9a044412020-08-17 19:08:35 +01001304 const uint32_t operandIndex = operation.inputs[inputIndex];
1305
1306 // Check if the dynamic input tensors have been inferred by one of the previous layers
1307 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001308 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001309 {
1310 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1311 }
1312 else
1313 {
1314 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1315 return LayerInputHandle();
1316 }
Kevin May42477c12020-03-26 13:34:14 +00001317 }
1318
1319 switch (operand->lifetime)
1320 {
1321 case HalOperandLifeTime::SUBGRAPH_INPUT:
1322 {
1323 // NOTE: We must check whether we can support the input tensor on at least one
1324 // of the provided backends; otherwise we cannot convert the operation
1325 bool isInputSupported = false;
1326 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1327 IsInputSupported,
1328 data.m_Backends,
1329 isInputSupported,
1330 operandTensorInfo);
1331
1332 if (!isInputSupported)
1333 {
1334 Fail("%s: unsupported input tensor", __func__);
1335 return LayerInputHandle();
1336 }
1337
James Ward4e22f602020-10-20 15:50:33 +01001338 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001339 }
1340 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1341 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1342 {
1343 // The tensor is either an operand internal to the model, or a model input.
1344 // It can be associated with an ArmNN output slot for an existing layer.
1345
1346 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1347 const uint32_t operandIndex = operation.inputs[inputIndex];
1348 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1349 }
1350 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1351 case HalOperandLifeTime::CONSTANT_REFERENCE:
1352 {
1353 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1354 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1355 if (tensorPin.IsValid())
1356 {
1357 bool isSupported = false;
1358 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1359 IsConstantSupported,
1360 data.m_Backends,
1361 isSupported,
1362 tensorPin.GetConstTensor().GetInfo());
1363 if (!isSupported)
1364 {
1365 return LayerInputHandle();
1366 }
1367
1368 armnn::IConnectableLayer* constantLayer =
1369 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1370 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1371 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1372
1373 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1374 }
1375 else
1376 {
1377 Fail("%s: invalid operand tensor", __func__);
1378 return LayerInputHandle();
1379 }
1380 break;
1381 }
1382 default:
1383 {
1384 // Unsupported lifetime for an input tensor
1385 Fail("%s: unsupported lifetime for input tensor: %s",
1386 __func__, toString(operand->lifetime).c_str());
1387 return LayerInputHandle();
1388 }
1389 }
1390 }
1391 catch (UnsupportedOperand<HalOperandType>& e)
1392 {
1393 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1394 return LayerInputHandle();
1395 }
1396}
1397#endif
1398
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001399template<typename HalPolicy,
1400 typename HalOperation = typename HalPolicy::Operation,
1401 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001402bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1403 uint32_t operationOutputIndex,
1404 armnn::IConnectableLayer& layer,
1405 uint32_t layerOutputIndex,
1406 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001407 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001408 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001409 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001410 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001411 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001413 using HalOperand = typename HalPolicy::Operand;
1414
1415 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001416 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1417 {
1418 return false;
1419 }
1420
1421 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001422 if (overrideOutputInfo == nullptr)
1423 {
1424 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1425 }
1426 else
1427 {
1428 outputSlot.SetTensorInfo(*overrideOutputInfo);
1429 }
1430
Finn Williamsa4983ce2020-07-23 12:55:12 +01001431 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001432 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001433 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001434 // Type one dynamic tensors require the previous layer's output shape for inference
1435 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1436 {
1437 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1438 {
1439 return false;
1440 }
1441 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001442 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001443 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001444 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001445 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1446
Sadik Armagandbda4b72020-09-03 11:33:07 +01001447 if(!isSupported)
1448 {
1449 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1450 {
1451 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1452 }
1453 return false;
1454 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001455 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456
Finn Williamsa4983ce2020-07-23 12:55:12 +01001457 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001458
1459 if (activationFunction != ActivationFn::kActivationNone)
1460 {
1461 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1462 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1463 &layer, data);
1464
1465 if (!endLayer)
1466 {
1467 return Fail("%s: ProcessActivation failed", __func__);
1468 }
1469
1470 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1471 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1472 }
1473 else
1474 {
1475 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1476 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001477
Mike Kellyb5fdf382019-06-11 16:35:25 +01001478 return true;
1479}
1480
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481template<typename HalPolicy,
1482 typename HalOperation = typename HalPolicy::Operation,
1483 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001484armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1485 uint32_t inputIndex,
1486 const HalModel& model,
1487 ConversionData& data)
1488{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001489 using HalOperand = typename HalPolicy::Operand;
1490
1491 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001492 if (!operand)
1493 {
1494 return armnn::DataLayout::NHWC;
1495 }
1496
1497 if (!IsBool(*operand))
1498 {
1499 return armnn::DataLayout::NHWC;
1500 }
1501
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001502 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001503 if (!valueAddress)
1504 {
1505 return armnn::DataLayout::NHWC;
1506 }
1507
1508 if (*(static_cast<const bool*>(valueAddress)))
1509 {
1510 return armnn::DataLayout::NCHW;
1511 }
1512 else
1513 {
1514 return armnn::DataLayout::NHWC;
1515 }
1516}
1517
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001518template<typename HalPolicy,
1519 typename HalOperation = typename HalPolicy::Operation,
1520 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001521bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1522 uint32_t outputIndex,
1523 armnn::IConnectableLayer& layer,
1524 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001525 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001526 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001527 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1528 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001529{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001530 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1531 outputIndex,
1532 layer,
1533 outputIndex,
1534 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001535 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001536 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001537 validateFunc,
1538 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001539}
1540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001541template<typename HalPolicy,
1542 typename HalOperation = typename HalPolicy::Operation,
1543 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001544bool ConvertToActivation(const HalOperation& operation,
1545 const char* operationName,
1546 const armnn::ActivationDescriptor& activationDesc,
1547 const HalModel& model,
1548 ConversionData& data)
1549{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001550 using HalOperand = typename HalPolicy::Operand;
1551
1552 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001553 if (!input.IsValid())
1554 {
1555 return Fail("%s: Input 0 is invalid", operationName);
1556 }
1557
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001558 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001559 if (!outputOperand)
1560 {
1561 return false;
1562 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001563
1564 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001565
1566 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001567
1568 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1569 {
1570 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1571 IsActivationSupported,
1572 data.m_Backends,
1573 isSupported,
1574 input.GetTensorInfo(),
1575 outInfo,
1576 activationDesc);
1577 };
1578
1579 if(IsDynamicTensor(outInfo))
1580 {
1581 isSupported = AreDynamicTensorsSupported();
1582 }
1583 else
1584 {
1585 validateFunc(outInfo, isSupported);
1586 }
1587
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001588 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001589 {
1590 return false;
1591 }
1592
1593 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001594 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001595 input.Connect(layer->GetInputSlot(0));
1596
Finn Williamsa4983ce2020-07-23 12:55:12 +01001597 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001598}
1599
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001600template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001601 typename HalOperation = typename HalPolicy::Operation,
1602 typename HalModel = typename HalPolicy::Model>
1603bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1604{
1605 armnn::ActivationDescriptor desc;
1606 desc.m_Function = armnn::ActivationFunction::ReLu;
1607
1608 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1609}
1610
1611template<typename HalPolicy,
1612 typename HalOperation = typename HalPolicy::Operation,
1613 typename HalModel = typename HalPolicy::Model>
1614bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1615{
1616 armnn::ActivationDescriptor desc;
1617 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1618 desc.m_A = 1.0f;
1619 desc.m_B = -1.0f;
1620
1621 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1622}
1623
1624template<typename HalPolicy,
1625 typename HalOperation = typename HalPolicy::Operation,
1626 typename HalModel = typename HalPolicy::Model>
1627bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1628{
1629 armnn::ActivationDescriptor desc;
1630 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1631 desc.m_A = 6.0f;
1632
1633 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1634}
1635
1636template<typename HalPolicy,
1637 typename HalOperation = typename HalPolicy::Operation,
1638 typename HalModel = typename HalPolicy::Model>
1639bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1640{
1641 armnn::ActivationDescriptor desc;
1642 desc.m_Function = armnn::ActivationFunction::TanH;
1643 desc.m_A = 1.0f; // android nn does not support tanH parameters
1644 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1645
1646 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1647}
1648
1649template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001650 typename HalOperation = typename HalPolicy::Operation,
1651 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001652bool ConvertPaddings(const HalOperation& operation,
1653 const HalModel& model,
1654 ConversionData& data,
1655 unsigned int rank,
1656 armnn::PadDescriptor& padDescriptor)
1657{
1658 using HalOperand = typename HalPolicy::Operand;
1659
1660 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1661 if (!paddingsOperand)
1662 {
1663 return Fail("%s: Could not read paddings operand", __func__);
1664 }
1665
1666 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1667 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1668 {
1669 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1670 }
1671
1672 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001673 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1674 {
1675 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1676 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001677
1678 // add padding for each dimension of input tensor.
1679 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1680 {
1681 int paddingBeforeInput = paddings[i];
1682 int paddingAfterInput = paddings[i + 1];
1683
1684 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1685 {
1686 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1687 }
1688
1689 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1690 }
1691
1692 return true;
1693}
1694
1695template<typename HalPolicy,
1696 typename HalOperation = typename HalPolicy::Operation,
1697 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001698bool ConvertPooling2d(const HalOperation& operation,
1699 const char* operationName,
1700 armnn::PoolingAlgorithm poolType,
1701 const HalModel& model,
1702 ConversionData& data)
1703{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001704 using HalOperand = typename HalPolicy::Operand;
1705 using HalOperandType = typename HalPolicy::OperandType;
1706
1707 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001708 if (!input.IsValid())
1709 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001710 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001711 }
1712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001713 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001714 if (!output)
1715 {
1716 return Fail("%s: Could not read output 0", __func__);
1717 }
1718
1719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1721
arovir01b0717b52018-09-05 17:03:25 +01001722 armnn::Pooling2dDescriptor desc;
1723 desc.m_PoolType = poolType;
1724 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001725 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001726
1727 ActivationFn activation;
1728
Sadik Armagan15d63e22019-07-26 16:59:35 +01001729 auto inputSize = operation.inputs.size();
1730
1731 if (inputSize >= 10)
1732 {
1733 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1734 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1735 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1736 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1737 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1738 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1739 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1740 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1741 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1742 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1743 {
1744 return Fail("%s: Operation has invalid inputs", operationName);
1745 }
1746
Kevin May42477c12020-03-26 13:34:14 +00001747 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001748 {
1749 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1750 }
1751 }
1752 else
arovir01b0717b52018-09-05 17:03:25 +01001753 {
1754 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1755 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001756 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1757 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1758 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1759 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1760 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1761 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001762 {
1763 return Fail("%s: Operation has invalid inputs", operationName);
1764 }
1765
Kevin May42477c12020-03-26 13:34:14 +00001766 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001767 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001768 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001770
1771 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1772 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1773 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1774
1775 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1776 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001777 }
1778
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001779 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001780
1781 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1782 {
1783 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1784 IsPooling2dSupported,
1785 data.m_Backends,
1786 isSupported,
1787 inputInfo,
1788 outputInfo,
1789 desc);
1790
1791 };
1792
1793 if(IsDynamicTensor(outputInfo))
1794 {
1795 isSupported = AreDynamicTensorsSupported();
1796 }
1797 else
1798 {
1799 validateFunc(outputInfo, isSupported);
1800 }
1801
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001802 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001803 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001804 return false;
arovir01b0717b52018-09-05 17:03:25 +01001805 }
arovir01b0717b52018-09-05 17:03:25 +01001806
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001807 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1808 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001809 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001810 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001811 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001812
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001813 input.Connect(pooling2dLayer->GetInputSlot(0));
1814
Finn Williamsa4983ce2020-07-23 12:55:12 +01001815 if (!isSupported)
1816 {
1817 return false;
1818 }
1819
Kevin Mayfcf2a152020-09-08 16:06:32 +01001820 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1821 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001822}
1823
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001824template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001825 typename HalOperation = typename HalPolicy::Operation,
1826 typename HalModel = typename HalPolicy::Model>
1827bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001828{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001829 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001830
1831 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1832 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1833
1834 if (!input0.IsValid() || !input1.IsValid())
1835 {
1836 return Fail("%s: Operation has invalid inputs", __func__);
1837 }
1838
1839 // The FuseActivation parameter is always the input index 2
1840 // and it should be optional
1841 ActivationFn activationFunction;
1842 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1843 {
1844 return Fail("%s: Operation has invalid inputs", __func__);
1845 }
1846
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001847 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001848 if (!outputOperand)
1849 {
1850 return false;
1851 }
1852
1853 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1854 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1855
1856 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001857
1858 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001859 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1860 {
1861 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1862 IsAdditionSupported,
1863 data.m_Backends,
1864 isSupported,
1865 inputInfo0,
1866 inputInfo1,
1867 outputInfo);
1868 };
1869
1870 if(!IsDynamicTensor(outputInfo))
1871 {
1872 validateFunc(outputInfo, isSupported);
1873 }
1874 else
1875 {
1876 isSupported = AreDynamicTensorsSupported();
1877 }
1878
Mike Kelly46272802019-08-14 17:00:48 +01001879 if (!isSupported)
1880 {
1881 return false;
1882 }
1883
1884 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001885
Kevin Mayfcf2a152020-09-08 16:06:32 +01001886 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1887 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001888 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001889 return false;
1890 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001891
Kevin Mayfcf2a152020-09-08 16:06:32 +01001892 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1893 data, nullptr, validateFunc, activationFunction);
1894
Mike Kelly46272802019-08-14 17:00:48 +01001895}
1896
1897template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001898 typename HalOperation = typename HalPolicy::Operation,
1899 typename HalModel = typename HalPolicy::Model>
1900bool ConvertArgMinMax(const HalOperation& operation,
1901 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001902 ConversionData& data,
1903 armnn::ArgMinMaxFunction argMinMaxFunction)
1904{
1905 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1906
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001907 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001908 using HalOperandType = typename HalPolicy::OperandType;
1909
1910 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1911
1912 if (!input0.IsValid())
1913 {
1914 return Fail("%s: Operation has invalid inputs", __func__);
1915 }
1916
1917 int32_t axis;
1918 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1919 {
1920 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1921 }
1922
1923 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1924 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1925
1926 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1927 {
1928 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1929 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1930 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1931 return Fail("%s: Axis must be in range [-n, n)", __func__);
1932 }
1933
1934 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1935 if (!output)
1936 {
1937 return Fail("%s: Could not read output 0", __func__);
1938 }
1939
1940 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1941
1942 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001943
1944 armnn::ArgMinMaxDescriptor descriptor;
1945 descriptor.m_Function = argMinMaxFunction;
1946 descriptor.m_Axis = axis;
1947
1948 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001949
1950 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1951 {
1952 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1953 IsArgMinMaxSupported,
1954 data.m_Backends,
1955 isSupported,
1956 inputInfo0,
1957 outputInfo,
1958 descriptor);
1959 };
1960
1961 if(IsDynamicTensor(outputInfo))
1962 {
1963 isSupported = AreDynamicTensorsSupported();
1964 }
1965 else
1966 {
1967 validateFunc(outputInfo, isSupported);
1968 }
1969
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001970 if (!isSupported)
1971 {
1972 return false;
1973 }
1974
1975 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1976 assert(layer != nullptr);
1977
1978 input0.Connect(layer->GetInputSlot(0));
1979
Finn Williamsa4983ce2020-07-23 12:55:12 +01001980 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001981}
1982
1983template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001984 typename HalOperation = typename HalPolicy::Operation,
1985 typename HalModel = typename HalPolicy::Model>
1986bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001987{
Keith Davis6e4081f2020-09-03 13:17:21 +01001988 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001989 using HalOperandType = typename HalPolicy::OperandType;
1990
1991 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1992 if (operation.inputs.size() <= 1)
1993 {
1994 return Fail("%s: Operation has insufficient arguments", __func__);
1995 }
1996
1997 // Get inputs and outputs
1998 const std::size_t numInputTensors = operation.inputs.size() - 1;
1999
2000 int32_t concatDim;
2001 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2002 {
2003 return Fail("%s: Operation has invalid inputs", __func__);
2004 }
2005
2006 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2007 if (!outputOperand)
2008 {
2009 return Fail("%s: Operation has no outputs", __func__);
2010 }
2011
Keith Davis6e4081f2020-09-03 13:17:21 +01002012 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2013 armnn::TensorShape outputShape = outputInfo.GetShape();
2014 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002015 //
2016 // handle negative concat dims along the lines of tensorflow as described here:
2017 // https://www.tensorflow.org/api_docs/python/tf/concat
2018 // "negative axis refers to axis + rank(values)-th dimension"
2019 //
2020 if (concatDim < 0)
2021 {
2022 concatDim += outputShape.GetNumDimensions();
2023 }
2024
2025 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2026 {
2027 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2028 }
2029
2030 std::vector<LayerInputHandle> inputHandles;
2031 std::vector<armnn::TensorShape> inputShapes;
2032
2033 inputHandles.reserve(numInputTensors);
2034 inputShapes.reserve(numInputTensors);
2035
Keith Davis6e4081f2020-09-03 13:17:21 +01002036 bool inputsHaveBeenReshaped = false;
2037 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002038 for (uint32_t i = 0; i < numInputTensors; ++i)
2039 {
2040 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2041 if (!operand)
2042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
Teresa Charlin3b959602019-10-31 17:05:47 +00002046 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2047 if (!operandInputHandle.IsValid())
2048 {
2049 return Fail("%s: Operation has invalid inputs", __func__);
2050 }
Mike Kellyb8805202019-07-31 17:25:43 +01002051
Keith Davis6e4081f2020-09-03 13:17:21 +01002052 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002053 if (operandShape.GetNumDimensions() == 0)
2054 {
2055 return Fail("%s: Operands with rank 0 are not supported", __func__);
2056 }
2057
2058 if (RequiresReshape(operandShape))
2059 {
2060 inputsHaveBeenReshaped = true;
2061
2062 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2063
2064 // Expand the tensor to three dimensions
2065 if (operandShape.GetNumDimensions() == 2)
2066 {
2067 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2068 tensorDimensionsAdded = 1;
2069 }
2070 else
2071 {
2072 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2073 tensorDimensionsAdded = 2;
2074 }
2075
Kevin Mayaed08ac2019-12-12 16:33:31 +00002076 armnn::ReshapeDescriptor reshapeDescriptor;
2077 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2078
2079 bool isSupported = false;
2080 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2081 IsReshapeSupported,
2082 data.m_Backends,
2083 isSupported,
2084 operandInputHandle.GetTensorInfo(),
2085 reshapeInfo,
2086 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002087
Kevin Mayaed08ac2019-12-12 16:33:31 +00002088 if (!isSupported)
2089 {
2090 return false;
2091 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002092 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002093
2094 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002095 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002096 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097 }
2098
2099 inputShapes.emplace_back(operandShape);
2100 inputHandles.emplace_back(operandInputHandle);
2101
2102 if (!inputHandles.back().IsValid())
2103 {
2104 return Fail("%s: Operation has invalid inputs", __func__);
2105 }
2106 }
2107
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002108 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002109
2110 if (inputsHaveBeenReshaped)
2111 {
2112 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2113 concatDim += tensorDimensionsAdded;
2114
2115 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2116 if (tensorDimensionsAdded == 1)
2117 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002118 if (IsDynamicTensor(outputInfo))
2119 {
2120 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2121 }
2122 else
2123 {
2124 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2125 }
Mike Kellyb8805202019-07-31 17:25:43 +01002126 }
2127 else if (tensorDimensionsAdded == 2)
2128 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002129 if (IsDynamicTensor(outputInfo))
2130 {
2131 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2132 }
2133 else
2134 {
2135 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2136 }
Mike Kellyb8805202019-07-31 17:25:43 +01002137 }
2138 }
2139
2140 // Check if permutations is required and get the pair of permutations required for the concatenation.
2141 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2142 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002143 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002144 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2145 concatDim,
2146 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002147
Keith Davis6e4081f2020-09-03 13:17:21 +01002148 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2149 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002150 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002151 if (needPermute)
2152 {
2153 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2154 }
2155
2156 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002157 }
Mike Kellyb8805202019-07-31 17:25:43 +01002158 // this is no-op for identity swizzles, otherwise it replaces both
2159 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002160 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002161 {
2162 return false;
2163 }
Mike Kellyb8805202019-07-31 17:25:43 +01002164
2165 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2166 armnn::OriginsDescriptor concatDescriptor;
2167
2168 try
2169 {
2170 // The concat descriptor is always created across the only supported concat dimension
2171 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002172 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2173 inputShapes.end(),
2174 concatDim);
2175 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002176 {
2177 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2178 }
2179
2180 // Validate the output shape is correct given the input shapes based on the
2181 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002182 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002183 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002184 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2185 {
2186 return Fail("%s: Error validating the output shape for concat", __func__);
2187 }
Mike Kellyb8805202019-07-31 17:25:43 +01002188 }
2189
2190 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2191 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002193
Keith Davis6e4081f2020-09-03 13:17:21 +01002194 bool isSupported = false;
2195 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2196 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2197 outputInfo, concatDescriptor);
2198 };
2199
2200 if (!isDynamicTensor)
2201 {
2202 validateFunc(outputInfo, isSupported);
2203 }
2204 else
2205 {
2206 isSupported = AreDynamicTensorsSupported();
2207 }
2208
Mike Kellyb8805202019-07-31 17:25:43 +01002209 if (!isSupported)
2210 {
2211 return false;
2212 }
2213
2214 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2215 assert(layer != nullptr);
2216 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002217 // Connect inputs to the layer
2218 const int numInputSlots = layer->GetNumInputSlots();
2219 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2220 for (int i = 0; i < numInputSlots; ++i)
2221 {
2222 // connect the input directly to the merge (concat) layer
2223 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2224 }
2225
Keith Davis6e4081f2020-09-03 13:17:21 +01002226 // Transpose the output shape
2227 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002228 armnn::TransposeDescriptor transposeDesc;
2229 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002230 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2231 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2232 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002233 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002234 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002235 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002236 data.m_Backends,
2237 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002238 inputTransposeInfo,
2239 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002240 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002241 if (!isSupported)
2242 {
2243 return false;
2244 }
Mike Kellyb8805202019-07-31 17:25:43 +01002245 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002246 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002247 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002248 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002249
2250 return true;
2251 };
2252
2253 if (needPermute && !isDynamicTensor)
2254 {
2255 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002256 }
2257
2258 if (inputsHaveBeenReshaped)
2259 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002260 if (isDynamicTensor)
2261 {
2262 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002263 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002264 if (!ValidateConcatOutputShape(inputShapes,
2265 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2266 concatDim))
2267 {
2268 return Fail("%s: Error validating the output shape for concat", __func__);
2269 }
2270 transposeOutputShape();
2271 }
2272
Mike Kellyb8805202019-07-31 17:25:43 +01002273 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002274 // Undo the reshape knowing the amount of dimensions added
2275 if (tensorDimensionsAdded == 1)
2276 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002277 afterConcatInfo.SetShape(
2278 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002279 }
2280 else if (tensorDimensionsAdded == 2)
2281 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002282 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002283 }
2284
Kevin Mayaed08ac2019-12-12 16:33:31 +00002285 armnn::ReshapeDescriptor reshapeDescriptor;
2286 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002287 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002288
Keith Davis6e4081f2020-09-03 13:17:21 +01002289 isSupported = false;
2290 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2292 IsReshapeSupported,
2293 data.m_Backends,
2294 isSupported,
2295 concatInfo,
2296 afterConcatInfo,
2297 reshapeDescriptor);
2298 };
2299
2300 if (!IsDynamicTensor(afterConcatInfo))
2301 {
2302 validateReshapeFunc(afterConcatInfo, isSupported);
2303 }
2304 else
2305 {
2306 isSupported = AreDynamicTensorsSupported();
2307 }
2308
Kevin Mayaed08ac2019-12-12 16:33:31 +00002309 if (!isSupported)
2310 {
2311 return false;
2312 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002313 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2314 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2315 0,
2316 *layer,
2317 model,
2318 data,
2319 nullptr,
2320 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002321 }
2322
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002324}
2325
2326template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002327 typename HalOperation = typename HalPolicy::Operation,
2328 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002329bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2330{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002331 using HalOperand = typename HalPolicy::Operand;
2332 using HalOperandType = typename HalPolicy::OperandType;
2333
2334 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002335 if (!input.IsValid())
2336 {
2337 return Fail("%s: Operation has invalid inputs", __func__);
2338 }
2339
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002340 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341 if (!output)
2342 {
2343 return Fail("%s: Could not read output 0", __func__);
2344 }
2345
2346 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002347 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002348
2349 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002350 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2351 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002352
2353 if (!weightsPin.IsValid() || !biasPin.IsValid())
2354 {
2355 return Fail("%s: Operation has invalid inputs", __func__);
2356 }
2357
2358 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002359 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002360 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2361
2362 armnn::Convolution2dDescriptor desc;
2363 desc.m_DataLayout = armnn::DataLayout::NHWC;
2364 ActivationFn activation;
2365
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002366 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002367 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002368 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2369 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2370 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2371 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2372 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2373 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002374 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002375 {
2376 return Fail("%s: Operation has invalid inputs", __func__);
2377 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002378 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002379 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380 {
2381 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002382 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2383 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2384 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002385 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002386 {
2387 return Fail("%s: Operation has invalid inputs", __func__);
2388 }
2389
2390 const uint32_t kernelX = weights.GetShape()[2];
2391 const uint32_t kernelY = weights.GetShape()[1];
2392 const uint32_t inputX = inputInfo.GetShape()[2];
2393 const uint32_t inputY = inputInfo.GetShape()[1];
2394
2395 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2396 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002397 }
2398 else
2399 {
2400 return Fail("%s: Unsupported number of operation inputs", __func__);
2401 }
2402
2403 desc.m_BiasEnabled = true;
2404 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2405
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002406 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002407 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2408 {
2409 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2410 IsConvolution2dSupported,
2411 data.m_Backends,
2412 isSupported,
2413 inputInfo,
2414 outputInfo,
2415 desc,
2416 weights.GetInfo(),
2417 biases);
2418 };
2419
2420 if(!IsDynamicTensor(outputInfo))
2421 {
2422 validateFunc(outputInfo, isSupported);
2423 }
2424 else
2425 {
2426 isSupported = AreDynamicTensorsSupported();
2427 }
2428
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002429 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002430 {
2431 return false;
2432 }
2433
2434 armnn::IConnectableLayer* startLayer =
2435 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2436
2437 if (!startLayer)
2438 {
2439 return Fail("%s: AddConvolution2dLayer failed", __func__);
2440 }
2441
Mike Kellyb5fdf382019-06-11 16:35:25 +01002442 input.Connect(startLayer->GetInputSlot(0));
2443
Kevin Mayfcf2a152020-09-08 16:06:32 +01002444 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2445 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002446}
2447
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002448template<typename HalPolicy,
2449 typename HalOperation = typename HalPolicy::Operation,
2450 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002451bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2452{
2453 using HalOperand = typename HalPolicy::Operand;
2454 using HalOperandType = typename HalPolicy::OperandType;
2455
2456 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2457 if (!input.IsValid() )
2458 {
2459 return Fail("%s: Operation has invalid inputs", __func__);
2460 }
2461
2462 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2463 unsigned int rank = inputInfo.GetNumDimensions();
2464 if (rank != 4)
2465 {
2466 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2467 }
2468
2469 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2470 if (!output)
2471 {
2472 return Fail("%s: Could not read output 0", __func__);
2473 }
2474
2475 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002476
2477 armnn::DepthToSpaceDescriptor descriptor;
2478
2479 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2480 if (descriptor.m_BlockSize <= 1)
2481 {
2482 return Fail("%s: Block size must be at least 1 in all dimensions");
2483 }
2484
2485 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002486 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002487 {
2488 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2489 }
2490
2491 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002492 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2493 {
2494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2495 IsDepthToSpaceSupported,
2496 data.m_Backends,
2497 isSupported,
2498 inputInfo,
2499 outputInfo,
2500 descriptor);
2501 };
2502
2503 if(!IsDynamicTensor(outputInfo))
2504 {
2505 validateFunc(outputInfo, isSupported);
2506 }
2507 else
2508 {
2509 isSupported = AreDynamicTensorsSupported();
2510 }
2511
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002512 if (!isSupported)
2513 {
2514 return false;
2515 }
2516
2517 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2518 assert(layer != nullptr);
2519 input.Connect(layer->GetInputSlot(0));
2520
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002521 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002522}
2523
2524template<typename HalPolicy,
2525 typename HalOperation = typename HalPolicy::Operation,
2526 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002527bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2528{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002529 using HalOperand = typename HalPolicy::Operand;
2530 using HalOperandType = typename HalPolicy::OperandType;
2531
2532 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002533
2534 if (!input.IsValid())
2535 {
2536 return Fail("%s: Operation has invalid inputs", __func__);
2537 }
2538
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002539 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002540
2541 if (!output)
2542 {
2543 return Fail("%s: Could not read output 0", __func__);
2544 }
2545
2546 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002547 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002548
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002549 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002550 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002551 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002552
2553 if (weightsOperand == nullptr)
2554 {
2555 return Fail("%s: Operand is invalid", __func__);
2556 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002557 // Basic sanity check on the weights shape.
2558 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2559 // [1, filter_height, filter_width, depth_out]
2560 if (weightsOperand->dimensions[0] != 1)
2561 {
2562 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2563 }
2564
Mike Kellyb5fdf382019-06-11 16:35:25 +01002565 armnn::DepthwiseConvolution2dDescriptor desc;
2566 desc.m_DataLayout = armnn::DataLayout::NHWC;
2567
Mike Kellyb5fdf382019-06-11 16:35:25 +01002568 // Reinterpret weight data as [ H, W, I, M ]
2569 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2570 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002571 inputInfo.GetShape()[3],
2572 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002573
2574 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2575 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2576
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002577 const ConstTensorPin weightsPin =
2578 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2579 1,
2580 model,
2581 data,
2582 HWIMToMIHW,
2583 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002584
2585 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002586 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002587
2588 if (!weightsPin.IsValid() || !biasPin.IsValid())
2589 {
2590 return Fail("%s: Operation has invalid inputs", __func__);
2591 }
2592
2593 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2594 armnn::ConstTensor bias = biasPin.GetConstTensor();
2595 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2596
2597 ActivationFn activation;
2598
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002599 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002600 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002601 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2602 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2603 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2604 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2605 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2606 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002607 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002608 {
2609 return Fail("%s: Operation has invalid inputs", __func__);
2610 }
2611 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002612 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002613 {
2614 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002615 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2616 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2617 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002618 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002619 {
2620 return Fail("%s: Operation has invalid inputs", __func__);
2621 }
2622
2623 const uint32_t kernelX = weights.GetShape()[3];
2624 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002625 const uint32_t inputX = inputInfo.GetShape()[2];
2626 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002627
2628 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2629 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2630 }
2631 else
2632 {
2633 return Fail("%s: Unsupported number of operation inputs", __func__);
2634 }
2635
2636 desc.m_BiasEnabled = true;
2637 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2638
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002639 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002640 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2641 {
2642 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2643 IsDepthwiseConvolutionSupported,
2644 data.m_Backends,
2645 isSupported,
2646 inputInfo,
2647 outputInfo,
2648 desc,
2649 weights.GetInfo(),
2650 biases);
2651 };
2652
2653 if(!IsDynamicTensor(outputInfo))
2654 {
2655 validateFunc(outputInfo, isSupported);
2656 }
2657 else
2658 {
2659 isSupported = AreDynamicTensorsSupported();
2660 }
2661
2662
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002663 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002664 {
2665 return false;
2666 }
2667
2668 armnn::IConnectableLayer* startLayer =
2669 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2670 if (!startLayer)
2671 {
2672 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2673 }
2674
Mike Kellyb5fdf382019-06-11 16:35:25 +01002675 input.Connect(startLayer->GetInputSlot(0));
2676
Kevin Mayfcf2a152020-09-08 16:06:32 +01002677 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2678 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002679}
2680
Mike Kelly3c673942019-07-25 09:26:06 +01002681template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002682 typename HalOperation = typename HalPolicy::Operation,
2683 typename HalModel = typename HalPolicy::Model>
2684bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002685{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002686 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002687
2688 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2689 if (!input.IsValid())
2690 {
2691 return Fail("%s: Operation has invalid input", __func__);
2692 }
2693
Sadik Armagan98c0f662019-11-21 15:54:36 +00002694 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2695 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2696 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2697 {
2698 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2699 }
2700
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002701 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002702 if (!outputOperand)
2703 {
2704 return Fail("%s: Operation has invalid outputs", __func__);
2705 }
2706
2707 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002708
2709 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002710 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2711 {
2712 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2713 IsDequantizeSupported,
2714 data.m_Backends,
2715 isSupported,
2716 inputInfo,
2717 outputInfo);
2718 };
2719
2720 if(IsDynamicTensor(outputInfo))
2721 {
2722 isSupported = AreDynamicTensorsSupported();
2723 }
2724 else
2725 {
2726 validateFunc(outputInfo, isSupported);
2727 }
2728
Mike Kelly46272802019-08-14 17:00:48 +01002729 if (!isSupported)
2730 {
2731 return false;
2732 }
2733
2734 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2735 assert(layer != nullptr);
2736 input.Connect(layer->GetInputSlot(0));
2737
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002738 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002739}
2740
2741template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002742 typename HalOperation = typename HalPolicy::Operation,
2743 typename HalModel = typename HalPolicy::Model>
2744bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002745{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002746 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002747
2748 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2749 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2750
2751 if (!input0.IsValid() || !input1.IsValid())
2752 {
2753 return Fail("%s: Operation has invalid inputs", __func__);
2754 }
2755
2756 // The FuseActivation parameter is always the input index 2
2757 // and it should be optional
2758 ActivationFn activationFunction;
2759 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2760 {
2761 return Fail("%s: Operation has invalid inputs", __func__);
2762 }
2763
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002764 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002765 if (!output)
2766 {
2767 return Fail("%s: Could not read output 0", __func__);
2768 }
2769
2770 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002771
2772 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002773 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2774 {
2775 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2776 IsDivisionSupported,
2777 data.m_Backends,
2778 isSupported,
2779 input0.GetTensorInfo(),
2780 input1.GetTensorInfo(),
2781 outputInfo);
2782 };
2783
2784 if(!IsDynamicTensor(outputInfo))
2785 {
2786 validateFunc(outputInfo, isSupported);
2787 }
2788 else
2789 {
2790 isSupported = AreDynamicTensorsSupported();
2791 }
2792
Mike Kelly46272802019-08-14 17:00:48 +01002793 if (!isSupported)
2794 {
2795 return false;
2796 }
2797
2798 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002799
Kevin Mayfcf2a152020-09-08 16:06:32 +01002800 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2801 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002802 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002803 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002804 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002805
2806 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2807 data, nullptr, validateFunc, activationFunction);
2808
Mike Kelly46272802019-08-14 17:00:48 +01002809}
2810
2811template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002812 typename HalOperation = typename HalPolicy::Operation,
2813 typename HalModel = typename HalPolicy::Model>
2814bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002815{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002816 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002817
2818 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2819 if (!input.IsValid())
2820 {
2821 return Fail("%s: Operation has invalid inputs", __func__);
2822 }
2823
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002824 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002825 if (!outputOperand)
2826 {
2827 return Fail("%s: Operation has invalid outputs", __func__);
2828 }
2829
2830 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002831
2832 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002833 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2834 {
2835 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2836 IsFloorSupported,
2837 data.m_Backends,
2838 isSupported,
2839 input.GetTensorInfo(),
2840 outputInfo);
2841 };
2842
2843 if(!IsDynamicTensor(outputInfo))
2844 {
2845 validateFunc(outputInfo, isSupported);
2846 }
2847 else
2848 {
2849 isSupported = AreDynamicTensorsSupported();
2850 }
2851
Mike Kelly46272802019-08-14 17:00:48 +01002852 if (!isSupported)
2853 {
2854 return false;
2855 }
2856
2857 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2858 assert(layer != nullptr);
2859 input.Connect(layer->GetInputSlot(0));
2860
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002861 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002862}
2863
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002864inline bool IsQSymm8(const V1_0::Operand&)
2865{
2866 return false;
2867}
2868
Kevin May42477c12020-03-26 13:34:14 +00002869#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002870
2871inline bool IsQSymm8(const V1_2::Operand& operand)
2872{
2873 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2874}
2875
2876#endif
2877
Kevin May42477c12020-03-26 13:34:14 +00002878#ifdef ARMNN_ANDROID_NN_V1_3
2879
2880inline bool IsQSymm8(const V1_3::Operand& operand)
2881{
2882 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2883}
2884
2885#endif
2886
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002887enum class DequantizeStatus
2888{
2889 SUCCESS,
2890 NOT_REQUIRED,
2891 INVALID_OPERAND
2892};
2893
2894using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2895
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002896template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002897 typename HalOperation = typename HalPolicy::Operation,
2898 typename HalModel = typename HalPolicy::Model>
2899DequantizeResult DequantizeIfRequired(size_t operand_index,
2900 const HalOperation& operation,
2901 const HalModel& model,
2902 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002903{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002904 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002905
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002906 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002907 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002908 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002909 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002910 }
2911
2912 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2913 {
2914 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002915 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002916 }
2917
2918 const size_t weightsInputIndex = operation.inputs[operand_index];
2919
2920 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2921 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002922 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002923 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002924 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002925 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002926 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2927 {
2928 continue;
2929 }
2930
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002931 size_t outOpIndex = weightsInputIndex + 1;
2932 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002933 {
2934 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002935 }
2936
2937 if (outOpIndex != weightsInputIndex)
2938 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002939 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002940 }
2941
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002942 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002943 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002944
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002945 if (!IsQSymm8(*operand))
2946 {
2947 // Only supporting dequantize from QSYMM8 to FLOAT
2948 break;
2949 }
2950
2951 // Allocate a new buffer for the dequantized data and manually dequantize
2952 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2953 if (!startValue)
2954 {
2955 // Failed to get the operand address
2956 break;
2957 }
2958
2959 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2960 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002961 const float quantizationScale = operand->scale;
2962
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002963 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2964 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2965 {
2966 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002967 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002968 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2969 }
2970
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002971 // Construct tensor info for dequantized ConstTensor
2972 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2973 operand->dimensions.data(),
2974 armnn::DataType::Float32);
2975
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002976 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2977 std::move(tensorInfo),
2978 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002979 }
2980
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002981 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002982}
2983
2984template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002985 typename HalOperation = typename HalPolicy::Operation,
2986 typename HalModel = typename HalPolicy::Model>
2987ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2988 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002989 const ConversionData& data,
2990 size_t operandIndex,
2991 bool optional = false)
2992{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002993 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2994
2995 DequantizeStatus status = std::get<3>(dequantized);
2996 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002997 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002998 case DequantizeStatus::INVALID_OPERAND:
2999 {
3000 // return invalid const tensor pin
3001 return ConstTensorPin();
3002 }
3003 case DequantizeStatus::NOT_REQUIRED:
3004 {
3005 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3006 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3007 }
3008 case DequantizeStatus::SUCCESS:
3009 default:
3010 {
3011 return ConstTensorPin(
3012 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3013 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003014 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003015}
3016
3017
Mike Kelly46272802019-08-14 17:00:48 +01003018template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003019 typename HalOperation = typename HalPolicy::Operation,
3020 typename HalModel = typename HalPolicy::Model>
3021bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003022{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003023 using HalOperand = typename HalPolicy::Operand;
3024
Mike Kelly46272802019-08-14 17:00:48 +01003025 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3026 if (!input.IsValid())
3027 {
3028 return Fail("%s: Operation has invalid inputs", __func__);
3029 }
3030
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003031 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003032 if (!output)
3033 {
3034 return Fail("%s: Could not read output 0", __func__);
3035 }
3036
3037 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3038 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3039
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003040 LayerInputHandle weightsInput = LayerInputHandle();
3041 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3042 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003043 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003044 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003045 }
3046
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003047 const armnn::TensorInfo& weightsInfo = GetTensorInfoForOperand(*weightsOperand);
3048 bool constantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
3049
3050 armnn::Optional<armnn::ConstTensor> optionalWeights = armnn::EmptyOptional();
3051 if (!constantWeights)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003052 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003053 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3054 if (!weightsInput.IsValid())
3055 {
3056 return Fail("%s: Operation has invalid inputs", __func__);
3057 }
3058 }
3059 else
3060 {
3061 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3062 if (!weightsPin.IsValid())
3063 {
3064 return Fail("%s: Operation has invalid weights", __func__);
3065 }
3066 optionalWeights = armnn::Optional<armnn::ConstTensor>(weightsPin.GetConstTensor());
Mike Kelly46272802019-08-14 17:00:48 +01003067 }
3068
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003069 LayerInputHandle biasInput = LayerInputHandle();
3070 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3071 if (!biasOperand)
3072 {
3073 return Fail("%s: Could not read bias", __func__);
3074 }
3075 armnn::TensorInfo biasInfo = GetTensorInfoForOperand(*biasOperand);
3076 bool constantBias = IsOperandConstant<HalPolicy>(*biasOperand);
3077
3078 armnn::Optional<armnn::ConstTensor> optionalBias = armnn::EmptyOptional();
3079 if (!constantBias)
3080 {
3081 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data);
3082 if (!biasInput.IsValid())
3083 {
3084 return Fail("%s: Operation has invalid inputs", __func__);
3085 }
3086 }
3087 else
3088 {
3089 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
3090 if (!biasPin.IsValid())
3091 {
3092 return Fail("%s: Operation has invalid bias", __func__);
3093 }
3094 optionalBias = armnn::Optional<armnn::ConstTensor>(biasPin.GetConstTensor());
3095 }
3096
3097 if ((constantWeights && !constantBias) || (!constantWeights && constantBias))
3098 {
3099 return Fail("%s: Non-compatible weights and bias", __func__);
3100 }
3101
Mike Kelly46272802019-08-14 17:00:48 +01003102 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003103 try
3104 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003105 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003106 }
3107 catch (const std::exception& e)
3108 {
Mike Kelly46272802019-08-14 17:00:48 +01003109 return Fail("%s: %s", __func__, e.what());
3110 }
3111
3112 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003113 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003114
3115 ActivationFn activationFunction;
3116 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3117 {
3118 return Fail("%s: Operation has invalid inputs", __func__);
3119 }
3120
3121 armnn::FullyConnectedDescriptor desc;
3122 desc.m_TransposeWeightMatrix = true;
3123 desc.m_BiasEnabled = true;
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003124 desc.m_ConstantWeights = constantWeights;
Mike Kelly46272802019-08-14 17:00:48 +01003125
3126 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003127 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3128 {
Finn Williams49184462020-10-02 13:28:34 +01003129 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003130 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003131 outputInfo.GetShape(),
3132 desc.m_TransposeWeightMatrix))
3133 {
3134 isSupported = false;
3135 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3136 return;
3137 }
3138
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003139 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003140 IsFullyConnectedSupported,
3141 data.m_Backends,
3142 isSupported,
3143 reshapedInfo,
3144 outputInfo,
3145 weightsInfo,
3146 biasInfo,
3147 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003148 };
3149
3150 if(!IsDynamicTensor(outputInfo))
3151 {
3152 validateFunc(outputInfo, isSupported);
3153 }
3154 else
3155 {
3156 isSupported = AreDynamicTensorsSupported();
3157 }
3158
Mike Kelly46272802019-08-14 17:00:48 +01003159 if (!isSupported)
3160 {
3161 return false;
3162 }
3163
3164 armnn::IConnectableLayer* startLayer =
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003165 data.m_Network->AddFullyConnectedLayer(desc,
3166 optionalWeights,
3167 optionalBias);
Mike Kelly46272802019-08-14 17:00:48 +01003168
Kevin Mayfcf2a152020-09-08 16:06:32 +01003169 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003170 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003171 armnn::ReshapeDescriptor reshapeDescriptor;
3172 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003173
Kevin Mayfcf2a152020-09-08 16:06:32 +01003174 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3175 assert(reshapeLayer != nullptr);
3176 input.Connect(reshapeLayer->GetInputSlot(0));
3177 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3178 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003179 }
3180 else
3181 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003182 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003183 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003184
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003185 // connect weights input
3186 if (!desc.m_ConstantWeights)
3187 {
3188 weightsInput.Connect(startLayer->GetInputSlot(1));
3189 biasInput.Connect(startLayer->GetInputSlot(2));
3190 }
3191
Kevin Mayfcf2a152020-09-08 16:06:32 +01003192 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3193 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003194}
3195
3196template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003197 typename HalOperation = typename HalPolicy::Operation,
3198 typename HalModel = typename HalPolicy::Model>
3199bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003200{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003201 using HalOperand = typename HalPolicy::Operand;
3202
Mike Kelly999e2092019-08-15 10:46:46 +01003203 if (operation.inputs.size() != 1)
3204 {
3205 return Fail("%s: Optional inputs are not supported", __func__);
3206 }
3207
Mike Kelly46272802019-08-14 17:00:48 +01003208 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3209 if (!input.IsValid())
3210 {
3211 return Fail("%s: Operation has invalid inputs", __func__);
3212 }
3213
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003214 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003215 if (!output)
3216 {
3217 return Fail("%s: Could not read output 0", __func__);
3218 }
3219
3220 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3221 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3222
Mike Kelly46272802019-08-14 17:00:48 +01003223 if (outputInfo.GetNumDimensions() != 4u)
3224 {
3225 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3226 }
3227
3228 armnn::L2NormalizationDescriptor desc;
3229 desc.m_DataLayout = armnn::DataLayout::NHWC;
3230
3231 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003232 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3233 {
3234 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3235 IsL2NormalizationSupported,
3236 data.m_Backends,
3237 isSupported,
3238 inputInfo,
3239 outputInfo,
3240 desc);
3241 };
3242
3243 if(!IsDynamicTensor(outputInfo))
3244 {
3245 validateFunc(outputInfo, isSupported);
3246 }
3247 else
3248 {
3249 isSupported = AreDynamicTensorsSupported();
3250 }
3251
Mike Kelly46272802019-08-14 17:00:48 +01003252 if (!isSupported)
3253 {
3254 return false;
3255 }
3256
3257 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3258 assert(layer != nullptr);
3259 input.Connect(layer->GetInputSlot(0));
3260
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003261 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003262}
3263
3264template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003265 typename HalOperation = typename HalPolicy::Operation,
3266 typename HalModel = typename HalPolicy::Model>
3267bool ConvertLocalResponseNormalization(const HalOperation& operation,
3268 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003269 ConversionData& data)
3270{
Mike Kelly999e2092019-08-15 10:46:46 +01003271 if (operation.inputs.size() != 5)
3272 {
3273 return Fail("%s: Optional inputs are not supported", __func__);
3274 }
3275
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003276 using HalOperand = typename HalPolicy::Operand;
3277 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003278
3279 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3280 if (!input.IsValid())
3281 {
3282 return Fail("%s: Operation has invalid inputs", __func__);
3283 }
3284
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003285 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003286 if (!output)
3287 {
3288 return Fail("%s: Could not read output 0", __func__);
3289 }
3290
3291 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3292 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3293
Mike Kelly46272802019-08-14 17:00:48 +01003294 if (outputInfo.GetNumDimensions() != 4u)
3295 {
3296 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3297 }
3298
3299 armnn::NormalizationDescriptor descriptor;
3300 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3301 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3302 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3303
3304 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003305 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003306 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3307 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3308 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3309 {
3310 return Fail("%s: Operation has invalid inputs", __func__);
3311 }
3312
3313 // ArmNN expects normSize to be the full size of the normalization
3314 // window rather than the radius as in AndroidNN.
3315 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3316
3317 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003318 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3319 {
3320 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3321 IsNormalizationSupported,
3322 data.m_Backends,
3323 isSupported,
3324 inputInfo,
3325 outputInfo,
3326 descriptor);
3327 };
3328
3329 if(!IsDynamicTensor(outputInfo))
3330 {
3331 validateFunc(outputInfo, isSupported);
3332 }
3333 else
3334 {
3335 isSupported = AreDynamicTensorsSupported();
3336 }
3337
Mike Kelly46272802019-08-14 17:00:48 +01003338 if (!isSupported)
3339 {
3340 return false;
3341 }
3342
3343
3344 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3345 assert(layer != nullptr);
3346 input.Connect(layer->GetInputSlot(0));
3347
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003348 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003349}
3350
3351template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003352 typename HalOperation = typename HalPolicy::Operation,
3353 typename HalModel = typename HalPolicy::Model>
3354bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003355{
Mike Kelly46272802019-08-14 17:00:48 +01003356 armnn::ActivationDescriptor desc;
3357 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3358
3359 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3360}
3361
3362template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003363 typename HalOperation = typename HalPolicy::Operation,
3364 typename HalModel = typename HalPolicy::Model>
3365bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003366{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003367 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003368
3369 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3370 if (!input.IsValid())
3371 {
3372 return Fail("%s: Operation has invalid inputs", __func__);
3373 }
3374
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003375 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003376 if (!output)
3377 {
3378 return Fail("%s: Could not read output 0", __func__);
3379 }
3380
3381 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003382
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003383 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003384 if (!axisOperand)
3385 {
3386 return Fail("%s: Could not read input 1", __func__);
3387 }
3388
3389 std::vector<int32_t> axis;
3390 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3391 {
3392 return Fail("%s: Input 1 has invalid values", __func__);
3393 }
3394
3395 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3396
3397 // Convert the axis to unsigned int and remove duplicates.
3398 unsigned int rank = inputInfo.GetNumDimensions();
3399 std::set<unsigned int> uniqueAxis;
3400 std::transform(axis.begin(), axis.end(),
3401 std::inserter(uniqueAxis, uniqueAxis.begin()),
3402 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3403
3404 // Get the "keep dims" flag.
3405 int32_t keepDims = 0;
3406 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3407 {
3408 return Fail("%s: Could not read input 2", __func__);
3409 }
3410
3411 armnn::MeanDescriptor descriptor;
3412 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3413 descriptor.m_KeepDims = keepDims > 0;
3414
3415 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003416 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3417 {
3418 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3419 IsMeanSupported,
3420 data.m_Backends,
3421 isSupported,
3422 inputInfo,
3423 outputInfo,
3424 descriptor);
3425 };
3426
3427 if(!IsDynamicTensor(outputInfo))
3428 {
3429 validateFunc(outputInfo, isSupported);
3430 }
3431 else
3432 {
3433 isSupported = AreDynamicTensorsSupported();
3434 }
3435
Mike Kelly46272802019-08-14 17:00:48 +01003436 if (!isSupported)
3437 {
3438 return false;
3439 }
3440
3441 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3442 assert(layer != nullptr);
3443 input.Connect(layer->GetInputSlot(0));
3444
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003445 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003446}
3447
3448template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003449 typename HalOperation = typename HalPolicy::Operation,
3450 typename HalModel = typename HalPolicy::Model>
3451bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003452{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003453 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003454
3455 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3456 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3457
3458 if (!input0.IsValid() || !input1.IsValid())
3459 {
3460 return Fail("%s: Operation has invalid inputs", __func__);
3461 }
3462
3463 // The FuseActivation parameter is always the input index 2
3464 // and it should be optional
3465 ActivationFn activationFunction;
3466 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3467 {
3468 return Fail("%s: Operation has invalid inputs", __func__);
3469 }
3470
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003471 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003472
3473 if (outputOperand == nullptr)
3474 {
3475 return false;
3476 }
3477
3478 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003479
3480 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003481 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3482 {
3483 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3484 IsMultiplicationSupported,
3485 data.m_Backends,
3486 isSupported,
3487 input0.GetTensorInfo(),
3488 input1.GetTensorInfo(),
3489 outputInfo);
3490 };
3491
3492 if(!IsDynamicTensor(outputInfo))
3493 {
3494 validateFunc(outputInfo, isSupported);
3495 }
3496 else
3497 {
3498 isSupported = AreDynamicTensorsSupported();
3499 }
3500
Mike Kelly46272802019-08-14 17:00:48 +01003501 if (!isSupported)
3502 {
3503 return false;
3504 }
3505
3506 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003507
Kevin Mayfcf2a152020-09-08 16:06:32 +01003508 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3509 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003510 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003511 return false;
3512 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003513
Kevin Mayfcf2a152020-09-08 16:06:32 +01003514 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3515 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003516}
3517
3518template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003519 typename HalOperation = typename HalPolicy::Operation,
3520 typename HalModel = typename HalPolicy::Model>
3521bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003522{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003523 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003524
Mike Kelly3c673942019-07-25 09:26:06 +01003525 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3526 if (!input.IsValid())
3527 {
3528 return Fail("%s: Operation has invalid inputs", __func__);
3529 }
3530
3531 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3532 unsigned int rank = inputInfo.GetNumDimensions();
3533
3534 armnn::PadDescriptor descriptor;
3535 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3536 {
3537 return Fail("%s: Could not convert paddings", __func__);
3538 }
3539
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003540 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3541 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003542 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3543 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3544 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003545 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003546 {
3547 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3548 }
3549
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003550 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003551 if (!output)
3552 {
3553 return Fail("%s: Could not read output", __func__);
3554 }
3555
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003556 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003557
3558 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003559 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3560 {
3561 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3562 IsPadSupported,
3563 data.m_Backends,
3564 isSupported,
3565 inputInfo,
3566 outputInfo,
3567 descriptor);
3568 };
3569
3570 if(!IsDynamicTensor(outputInfo))
3571 {
3572 validateFunc(outputInfo, isSupported);
3573 }
3574 else
3575 {
3576 isSupported = AreDynamicTensorsSupported();
3577 }
3578
Mike Kelly3c673942019-07-25 09:26:06 +01003579 if (!isSupported)
3580 {
3581 return false;
3582 }
3583
3584 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3585 assert(layer != nullptr);
3586 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003587
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003588 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003589}
3590
Mike Kelly0a879362019-07-29 16:56:31 +01003591template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003592 typename HalOperation = typename HalPolicy::Operation,
3593 typename HalModel = typename HalPolicy::Model>
3594bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003595{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003596 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003597
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003598 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3599 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3600 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003601
3602 if (inputOperand == nullptr
3603 || requestedShapeOperand == nullptr
3604 || outputOperand == nullptr)
3605 {
3606 return Fail("%s: Operation has invalid inputs", __func__);
3607 }
3608
3609 if (requestedShapeOperand->dimensions.size() != 1)
3610 {
3611 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3612 __func__, requestedShapeOperand->dimensions.size());
3613 }
3614
3615 std::vector<int32_t> targetDimensions;
3616 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3617 {
3618 return Fail("%s: Could not read values of input 1", __func__);
3619 }
3620
3621 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3622
3623 Shape requestedShape;
3624 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3625 // function that resolves these values into a fully specified tensor shape.
3626 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3627 {
3628 return Fail("%s: Failed to resolve the requested shape", __func__);
3629 }
3630
Mike Kelly46272802019-08-14 17:00:48 +01003631 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3632 if (!input.IsValid())
3633 {
3634 return Fail("%s: Could not read input 0", __func__);
3635 }
3636
3637 armnn::ReshapeDescriptor reshapeDescriptor;
3638 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3639 requestedShape.dimensions.data());
3640
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003641 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3642
Mike Kelly46272802019-08-14 17:00:48 +01003643 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003644 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3645 {
3646 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3647 IsReshapeSupported,
3648 data.m_Backends,
3649 isSupported,
3650 input.GetTensorInfo(),
3651 outputInfo,
3652 reshapeDescriptor);
3653 };
3654
3655 if(!IsDynamicTensor(outputInfo))
3656 {
3657 validateFunc(outputInfo, isSupported);
3658 }
3659 else
3660 {
3661 isSupported = AreDynamicTensorsSupported();
3662 }
3663
Mike Kelly46272802019-08-14 17:00:48 +01003664 if (!isSupported)
3665 {
3666 return false;
3667 }
3668
3669 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3670 assert(layer != nullptr);
3671 input.Connect(layer->GetInputSlot(0));
3672
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003673 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003674}
3675
3676template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003677 typename HalOperation = typename HalPolicy::Operation,
3678 typename HalModel = typename HalPolicy::Model>
3679bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003680{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003681 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003682
Mike Kelly0a879362019-07-29 16:56:31 +01003683 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3684 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3685
3686 if (!input0.IsValid() || !input1.IsValid())
3687 {
3688 return Fail("%s: Operation has invalid inputs", __func__);
3689 }
3690
3691 // The FuseActivation parameter is always the input index 2
3692 // and it should be optional
3693 ActivationFn activationFunction;
3694 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3695 {
3696 return Fail("%s: Operation has invalid inputs", __func__);
3697 }
3698
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003699 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003700 if (!output)
3701 {
3702 return Fail("%s: Could not read output 0", __func__);
3703 }
3704
3705 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003706
3707 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003708 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3709 {
3710 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3711 IsSubtractionSupported,
3712 data.m_Backends,
3713 isSupported,
3714 input0.GetTensorInfo(),
3715 input1.GetTensorInfo(),
3716 outputInfo);
3717 };
3718
3719 if(IsDynamicTensor(outputInfo))
3720 {
3721 isSupported = AreDynamicTensorsSupported();
3722 }
3723 else
3724 {
3725 validateFunc(outputInfo, isSupported);
3726 }
3727
Mike Kelly0a879362019-07-29 16:56:31 +01003728 if (!isSupported)
3729 {
3730 return false;
3731 }
3732
3733 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003734
Kevin Mayfcf2a152020-09-08 16:06:32 +01003735 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3736 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003737 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003738 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003739 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003740 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3741 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003742}
3743
Finn Williams23b87b32019-07-30 11:44:05 +01003744template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003745 typename HalOperation = typename HalPolicy::Operation,
3746 typename HalModel = typename HalPolicy::Model>
3747bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003748{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003749 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003750
3751 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3752 if (!input.IsValid())
3753 {
3754 return Fail("%s: Operation has invalid inputs", __func__);
3755 }
3756
3757 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3758 unsigned int rank = inputInfo.GetNumDimensions();
3759 if (rank > 4)
3760 {
3761 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3762 }
3763
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003764 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003765 if (!output)
3766 {
3767 return Fail("%s: Could not read output 0", __func__);
3768 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003769
3770 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003771 {
3772 return Fail("%s: Dynamic output tensors are not supported", __func__);
3773 }
3774
3775 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3776 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003777 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003778
3779 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3780
3781 std::vector<int32_t> axis;
3782 if (!axisOperand)
3783 {
3784 axis.assign(dimensionSequence,
3785 dimensionSequence + rank);
3786 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003787 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003788 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003789 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003790 }
3791
3792 std::vector<uint32_t> outputDims;
3793 for (unsigned int i = 0; i < rank; i++)
3794 {
3795 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3796 auto currentDimension = inputInfo.GetShape()[i];
3797 if (skipSqueeze || currentDimension != 1)
3798 {
3799 outputDims.push_back(currentDimension);
3800 }
3801 }
3802
3803 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3804
3805 armnn::TensorInfo outputInfo = inputInfo;
3806 outputInfo.SetShape(outShape);
3807
3808 armnn::ReshapeDescriptor reshapeDesc;
3809 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3810
3811 bool isSupported = false;
3812 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3813 IsReshapeSupported,
3814 data.m_Backends,
3815 isSupported,
3816 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003817 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003818 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003819
Mike Kelly46272802019-08-14 17:00:48 +01003820 if (!isSupported)
3821 {
3822 return false;
3823 }
3824
3825 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3826 assert(layer != nullptr);
3827 input.Connect(layer->GetInputSlot(0));
3828
3829 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3830}
3831
3832template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003833 typename HalOperation = typename HalPolicy::Operation,
3834 typename HalModel = typename HalPolicy::Model>
3835bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003836{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003837 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003838
3839 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3840 if (!input.IsValid())
3841 {
3842 return Fail("%s: Operation has invalid inputs", __func__);
3843 }
3844
3845 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3846 unsigned int rank = inputInfo.GetNumDimensions();
3847 if (rank > 4)
3848 {
3849 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3850 }
3851
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003852 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003853 if (!output)
3854 {
3855 return Fail("%s: Could not read output 0", __func__);
3856 }
3857
3858 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003859
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003860 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3861 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3862 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003863
3864 std::vector<int32_t> beginValues;
3865 std::vector<int32_t> endValues;
3866 std::vector<int32_t> stridesValues;
3867
3868 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003869 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003870 {
3871 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3872 {
3873 return false;
3874 }
3875
3876 if (operandValues.size() != rank)
3877 {
3878 return false;
3879 }
3880
3881 return true;
3882 };
3883
3884 if (!ValidateInputOperands(*beginOperand, beginValues)
3885 || !ValidateInputOperands(*endOperand, endValues)
3886 || !ValidateInputOperands(*stridesOperand, stridesValues))
3887 {
3888 return Fail("%s: Operation has invalid input operand", __func__);
3889 }
3890
3891 // Stride cannot have value '0'
3892 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3893 {
3894 return Fail("%s: Stride must be non-zero value.", __func__);
3895 }
3896
3897 armnn::StridedSliceDescriptor descriptor;
3898 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3899 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3900 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3901 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3902
3903 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3904 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3905 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3906 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3907 {
3908 return Fail("%s: Operation has invalid inputs", __func__);
3909 }
3910
3911 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003912 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3913 {
3914 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3915 IsStridedSliceSupported,
3916 data.m_Backends,
3917 isSupported,
3918 inputInfo,
3919 outputInfo,
3920 descriptor);
3921 };
3922
3923 if(IsDynamicTensor(outputInfo))
3924 {
3925 isSupported = AreDynamicTensorsSupported();
3926 }
3927 else
3928 {
3929 validateFunc(outputInfo, isSupported);
3930 }
3931
Mike Kelly46272802019-08-14 17:00:48 +01003932 if (!isSupported)
3933 {
3934 return false;
3935 }
3936
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003937 // Check if slice can fit in a inferred output
3938 armnn::TensorShape inputShape = inputInfo.GetShape();
3939 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3940 {
3941 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003942
3943 if (descriptor.m_ShrinkAxisMask & (1 << i))
3944 {
3945 // If the difference between the start point and the end point of the slice on an axis being shrunk
3946 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3947 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3948 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3949 {
3950 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3951 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003952
3953 if(stride < 0)
3954 {
3955 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3956 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003957 }
3958 }
3959
Mike Kelly46272802019-08-14 17:00:48 +01003960 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3961 assert(layer != nullptr);
3962 input.Connect(layer->GetInputSlot(0));
3963
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003964 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003965}
3966
3967template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003968 typename HalOperation = typename HalPolicy::Operation,
3969 typename HalModel = typename HalPolicy::Model>
3970bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003971{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003972 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003973 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003974
3975 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3976 if (!input.IsValid())
3977 {
3978 return Fail("%s: Operation has invalid inputs", __func__);
3979 }
3980
3981 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3982 unsigned int rank = inputInfo.GetNumDimensions();
3983 if (rank > 4)
3984 {
3985 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3986 }
3987
3988 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3989 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003990 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003991
3992 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003993 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003994 {
Mike Kelly46272802019-08-14 17:00:48 +01003995 for (unsigned int i = rank; i > 0; i--)
3996 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003997 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003998 }
3999 }
Mike Kellyeec836e2020-02-18 10:03:30 +00004000 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01004001 {
Mike Kellyeec836e2020-02-18 10:03:30 +00004002 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01004003 }
4004
4005 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
4006
Mike Kelly4a956582020-02-28 10:32:09 +00004007 armnn::TransposeDescriptor transposeDesc;
4008 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01004009
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004010 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004011 if (!output)
4012 {
4013 return Fail("%s: Could not read output 0", __func__);
4014 }
4015
4016 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4017
4018 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004019 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4020 {
4021 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4022 IsTransposeSupported,
4023 data.m_Backends,
4024 isSupported,
4025 inputInfo,
4026 outputInfo,
4027 transposeDesc);
4028 };
4029
4030 if(IsDynamicTensor(outputInfo))
4031 {
4032 isSupported = AreDynamicTensorsSupported();
4033 }
4034 else
4035 {
4036 validateFunc(outputInfo, isSupported);
4037 }
4038
Mike Kelly46272802019-08-14 17:00:48 +01004039 if (!isSupported)
4040 {
4041 return false;
4042 }
4043
Mike Kelly4a956582020-02-28 10:32:09 +00004044 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01004045 assert(layer != nullptr);
4046 input.Connect(layer->GetInputSlot(0));
4047
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004048 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004049}
4050
4051template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004052 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004053 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004054 typename HalModel = typename HalPolicy::Model>
4055bool ConvertBatchToSpaceNd(const HalOperation& operation,
4056 const HalModel& model,
4057 ConversionData& data)
4058{
Finn Williams23b87b32019-07-30 11:44:05 +01004059
4060 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4061 if (!input.IsValid())
4062 {
4063 return Fail("%s: Operation has invalid inputs", __func__);
4064 }
4065
4066 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4067 if (!output)
4068 {
4069 return Fail("%s: Could not read output 0", __func__);
4070 }
4071
4072 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004073
4074 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4075 if (!blockOperand)
4076 {
4077 return Fail("%s: Could not read input 1", __func__);
4078 }
4079
4080 // Convert the block operand to int32
4081 std::vector<int32_t> block;
4082 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4083 {
4084 return Fail("%s: Input 1 has invalid values", __func__);
4085 }
4086
4087 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4088
4089 unsigned int rank = inputInfo.GetNumDimensions();
4090 if (rank != 4)
4091 {
4092 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4093 }
4094
4095 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4096 {
4097 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4098 " greater than or equal to 1", __func__);
4099 }
4100
4101 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4102 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4103 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4104
Kevin May42477c12020-03-26 13:34:14 +00004105 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004106 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004107 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004108 }
4109 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4110 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4111
4112 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004113 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4114 {
4115 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4116 IsBatchToSpaceNdSupported,
4117 data.m_Backends,
4118 isSupported,
4119 inputInfo,
4120 outputInfo,
4121 batchToSpaceNdDesc);
4122 };
4123
4124 if(!IsDynamicTensor(outputInfo))
4125 {
4126 validateFunc(outputInfo, isSupported);
4127 }
4128 else
4129 {
4130 isSupported = AreDynamicTensorsSupported();
4131 }
4132
4133
Finn Williams23b87b32019-07-30 11:44:05 +01004134 if (!isSupported)
4135 {
4136 return false;
4137 }
4138
4139 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4140 assert(layer != nullptr);
4141 input.Connect(layer->GetInputSlot(0));
4142
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004143 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004144}
Mike Kelly0a879362019-07-29 16:56:31 +01004145
Finn Williamsd74c5052019-07-30 17:06:00 +01004146template<typename HalPolicy,
4147 typename HalOperation = typename HalPolicy::Operation,
4148 typename HalOperand = typename HalPolicy::Operand,
4149 typename HalModel = typename HalPolicy::Model>
4150bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4151{
4152 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4153 if (!input.IsValid())
4154 {
4155 return Fail("%s: Operation has invalid inputs", __func__);
4156 }
4157
4158 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4159 unsigned int rank = inputInfo.GetNumDimensions();
4160 unsigned int spatialDim = rank - 2;
4161
4162 if (rank != 4)
4163 {
4164 Fail("%s: Only inputs with rank 4 are supported", __func__);
4165 }
4166
4167 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4168 if (!output)
4169 {
4170 return Fail("%s: Could not read output 0", __func__);
4171 }
4172
4173 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004174
4175 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4176 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4177
4178 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4179 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4180 {
4181 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4182 }
4183
4184 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004185 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4186 {
4187 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4188 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004189 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4190 {
4191 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4192 }
4193
4194 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4195 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4196 {
4197 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4198 }
4199
4200 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4201 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004202 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4203 {
4204 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4205 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004206 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4207 {
4208 int paddingBeforeInput = paddings[i];
4209 int paddingAfterInput = paddings[i + 1];
4210 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4211 {
4212 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4213 }
4214
4215 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4216 }
4217
4218 armnn::SpaceToBatchNdDescriptor descriptor;
4219 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4220 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4221 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4222
Kevin May42477c12020-03-26 13:34:14 +00004223 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004224 {
4225 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4226 }
4227
4228 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004229 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4230 {
4231 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4232 IsSpaceToBatchNdSupported,
4233 data.m_Backends,
4234 isSupported,
4235 inputInfo,
4236 outputInfo,
4237 descriptor);
4238 };
4239
4240 if(IsDynamicTensor(outputInfo))
4241 {
4242 isSupported = AreDynamicTensorsSupported();
4243 }
4244 else
4245 {
4246 validateFunc(outputInfo, isSupported);
4247 }
4248
Finn Williamsd74c5052019-07-30 17:06:00 +01004249 if (!isSupported)
4250 {
4251 return false;
4252 }
4253
4254 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4255 assert(layer != nullptr);
4256 input.Connect(layer->GetInputSlot(0));
4257
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004258 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004259}
4260
saoste01b8471482018-10-10 09:44:51 +01004261} // namespace armnn_driver