blob: f48d58ed59e98e3d13c6965982bf2d73519a0459 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010012#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000013#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010014#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
James Ward4e22f602020-10-20 15:50:33 +010025#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026
27#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010028#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010029
30namespace armnn_driver
31{
32
33///
34/// Helper classes
35///
36
Kevin Mayec1e5b82020-02-26 17:00:39 +000037#ifdef ARMNN_ANDROID_R
38using OperandType = android::nn::hal::OperandType;
39#endif
40
Sadik Armagan188675f2021-02-12 17:16:42 +000041#ifdef ARMNN_ANDROID_S
42#include <nnapi/Types.h>
43#endif
44
45
arovir01b0717b52018-09-05 17:03:25 +010046struct ConversionData
47{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010048 ConversionData(const std::vector<armnn::BackendId>& backends)
49 : m_Backends(backends)
50 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010051 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010052 {}
53
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010054 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010055 armnn::INetworkPtr m_Network;
56 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
57 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010058 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010059};
60
61class LayerInputHandle
62{
63public:
64 LayerInputHandle();
65 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
66
67 bool IsValid() const;
68
69 void Connect(armnn::IInputSlot& inputSlot);
70
Finn Williamsa4983ce2020-07-23 12:55:12 +010071 void Disconnect(armnn::IInputSlot& inputSlot);
72
arovir01b0717b52018-09-05 17:03:25 +010073 const armnn::TensorInfo& GetTensorInfo() const;
74
75private:
76 armnn::IOutputSlot* m_OutputSlot;
77 bool m_Valid;
78 armnn::TensorInfo m_TensorInfo;
79};
80
81class ConstTensorPin
82{
83public:
84 // Creates an invalid tensor pin (can be used to signal errors)
85 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
86 ConstTensorPin(bool optional = false);
87
88 // @param tensorInfo TensorInfo associated with the tensor.
89 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
90 // the model being converted.
91 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010092 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010093 const armnn::PermutationVector& mappings);
94
95 ConstTensorPin(const ConstTensorPin& other) = delete;
96 ConstTensorPin(ConstTensorPin&& other) = default;
97
98 bool IsValid() const;
99 bool IsOptional() const;
100
101 const armnn::ConstTensor& GetConstTensor() const;
102 const armnn::ConstTensor* GetConstTensorPtr() const;
103
104private:
105 armnn::ConstTensor m_ConstTensor;
106
107 // Owned memory for swizzled tensor data, only required if the tensor needed
108 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
109 // the pools associated with the model being converted.
110 std::vector<uint8_t> m_SwizzledTensorData;
111
112 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
113 bool m_Optional;
114};
115
116} // namespace armnn_driver
117
118///
119/// Utility functions
120///
121
122namespace
123{
124
125using namespace armnn_driver;
126using namespace android::nn;
127
128// Convenience function to log the reason for failing to convert a model.
129// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
130template<class... Args>
131static bool Fail(const char* formatStr, Args&&... args)
132{
133 ALOGD(formatStr, std::forward<Args>(args)...);
134 return false;
135}
136
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100137// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
138// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
139#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140try \
141{ \
142 for (auto&& backendId : backends) \
143 { \
144 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000145 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100146 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100147 std::string reasonIfUnsupported; \
148 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000149 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100150 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100151 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 } \
154 else \
155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 if (reasonIfUnsupported.size() > 0) \
157 { \
158 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
159 } \
160 else \
161 { \
162 ALOGD("%s: not supported by armnn", funcName); \
163 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100164 } \
165 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100166 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100167 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100168 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100169 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100170 } \
171 if (!supported) \
172 { \
173 ALOGD("%s: not supported by any specified backend", funcName); \
174 } \
175} \
176catch (const armnn::InvalidArgumentException &e) \
177{ \
178 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
179}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100180
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000181template<typename HalOperand>
182armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100183{
184 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
185}
186
Matthew Bentham912b3622019-05-03 15:49:14 +0100187inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100188{
Matthew Bentham912b3622019-05-03 15:49:14 +0100189 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
190 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
191 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100192}
193
Kevin May42477c12020-03-26 13:34:14 +0000194#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100195
Keith Davis71006492020-01-06 17:44:16 +0000196// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100197inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
198{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000200 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 type == V1_2::OperandType::TENSOR_FLOAT16 ||
202 type == V1_2::OperandType::TENSOR_FLOAT32 ||
203 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000204 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000205 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
206 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207 type == V1_2::OperandType::TENSOR_INT32;
208}
209
210#endif
211
Kevin May42477c12020-03-26 13:34:14 +0000212#ifdef ARMNN_ANDROID_NN_V1_3
213
214// Support within the 1.3 driver for specific tensor data types
215inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
216{
217 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100218 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000219 type == V1_3::OperandType::TENSOR_FLOAT16 ||
220 type == V1_3::OperandType::TENSOR_FLOAT32 ||
221 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100222 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000223 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
224 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
225 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
226 type == V1_3::OperandType::TENSOR_INT32;
227}
228
229#endif
230
Mike Kellyb5fdf382019-06-11 16:35:25 +0100231inline bool IsBool(V1_0::Operand)
232{
233 return false;
234}
235
Kevin May42477c12020-03-26 13:34:14 +0000236inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100237{
238 return false;
239}
240
Kevin May42477c12020-03-26 13:34:14 +0000241#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100242
243inline bool IsBool(V1_2::Operand operand)
244{
245 return operand.type == V1_2::OperandType::BOOL;
246}
247
Sadik Armagan61113162019-07-25 09:09:40 +0100248/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000249inline bool Is12OrLaterOperand(V1_2::Operand)
250{
251 return true;
252}
253
254#endif
255
256#ifdef ARMNN_ANDROID_NN_V1_3
257
258inline bool IsBool(V1_3::Operand operand)
259{
260 return operand.type == V1_3::OperandType::BOOL;
261}
262
263/// Checks if a operand is 1_2 Operand
264inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100265{
266 return true;
267}
268
Mike Kellyb5fdf382019-06-11 16:35:25 +0100269#endif
270
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100271template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000272armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
273 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274 armnn::TensorInfo reshapeInfo)
275{
276 armnn::ReshapeDescriptor reshapeDescriptor;
277 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
278
279 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100280 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100281
282 // Attach the input layer to the reshape layer
283 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
284 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
285
286 return *reshapeLayer;
287}
288
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000289bool BroadcastTensor(LayerInputHandle& input0,
290 LayerInputHandle& input1,
291 armnn::IConnectableLayer* startLayer,
292 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100293{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100294 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100295
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
297 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
298
299 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
300 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
301
302 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100303 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
305 input0.Connect(startLayer->GetInputSlot(0));
306 input1.Connect(startLayer->GetInputSlot(1));
307
Sadik Armagan64b19b52019-08-19 09:49:58 +0100308 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100309 }
310
311 // Since the number of dimensions do not match then we need to add degenerate dimensions
312 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
313
314 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100315 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
316 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317
318 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
319 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
320 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
321
322 const armnn::TensorShape& smallShape = smallInfo.GetShape();
323 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
324 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
325 {
326 reshapedDimensions[i] = smallShape[i - sizeDifference];
327 }
328
329 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100330 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100331 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100332
333 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
334 armnn::ReshapeDescriptor reshapeDescriptor;
335
336 bool isSupported = false;
337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
338 IsReshapeSupported,
339 data.m_Backends,
340 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000341 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 reshapedInfo,
343 reshapeDescriptor);
344 if (!isSupported)
345 {
346 return false;
347 }
348
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100349 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100350 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100351
352 if (input0IsSmaller)
353 {
354 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
355 //
356 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100357 // | |
358 // Reshape |
359 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100360 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100361
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100362 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
363 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100364 }
365 else
366 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100367 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
368 //
369 // Input0 Input1
370 // | |
371 // | Reshape
372 // \ /
373 // StartLayer
374
arovir01b0717b52018-09-05 17:03:25 +0100375 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100377 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100378
379 return true;
arovir01b0717b52018-09-05 17:03:25 +0100380}
381
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000382void CalcPadding(uint32_t input,
383 uint32_t kernel,
384 uint32_t stride,
385 uint32_t& outPadHead,
386 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100387 android::nn::PaddingScheme scheme)
388{
389 int32_t padHead;
390 int32_t padTail;
391 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100392 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
393 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100394}
395
Kevin May42477c12020-03-26 13:34:14 +0000396#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100397
398void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
399 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
400{
401 int32_t padHead;
402 int32_t padTail;
403 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100404 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
405 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100406}
407
Mike Kelly26123db2020-01-15 10:02:33 +0000408void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100409 int32_t& outPadTail, android::nn::PaddingScheme scheme)
410{
411 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
412}
413
Mike Kelly86b36d42019-07-12 16:39:33 +0100414#endif
415
Matthew Bentham912b3622019-05-03 15:49:14 +0100416Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100417{
418 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100419 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100420 shape.dimensions = operand.dimensions;
421 shape.scale = operand.scale;
422 shape.offset = operand.zeroPoint;
423 return shape;
424}
425
Kevin May42477c12020-03-26 13:34:14 +0000426#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100427
428Shape GetOperandShape(const V1_2::Operand& operand)
429{
430 Shape shape;
431 shape.type = OperandType(operand.type);
432 shape.dimensions = operand.dimensions;
433 shape.scale = operand.scale;
434 shape.offset = operand.zeroPoint;
435 return shape;
436}
437
438#endif
439
Kevin May42477c12020-03-26 13:34:14 +0000440#ifdef ARMNN_ANDROID_NN_V1_3
441
442Shape GetOperandShape(const V1_3::Operand& operand)
443{
444 Shape shape;
445 shape.type = OperandType(operand.type);
446 shape.dimensions = operand.dimensions;
447 shape.scale = operand.scale;
448 shape.offset = operand.zeroPoint;
449 return shape;
450}
451
452#endif
453
arovir01b0717b52018-09-05 17:03:25 +0100454// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
455// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100456// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
457// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100458void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000459 const armnn::TensorInfo& weightInfo,
460 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100461{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000462 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100463 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 // NOTE: Bias scale is always set to 0 for per-axis quantization and
465 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
466 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100467 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000468 return biasScale * inputInfo.GetQuantizationScale();
469 };
470
471 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
472 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
473
474 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100475 // bias is expected to be a 1d tensor, set qdim=0
476 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000477
478 ALOGV("Bias quantization params have been updated for per-axis quantization");
479 }
480 else
481 {
482 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
483 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
484 {
James Ward4e22f602020-10-20 15:50:33 +0100485 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000486 {
487 ALOGW("Bias quantization scale has been modified to match input * weights");
488 biasInfo.SetQuantizationScale(expectedBiasScale);
489 }
arovir01b0717b52018-09-05 17:03:25 +0100490 }
491 }
492}
493
494// 4D Tensor Permutations
495const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100496const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100497const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
498
499// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000500const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
501const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100502
503template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000504armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
505 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100506{
507 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000508 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100509
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100510 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100511
512 // Connect input to swizzle layer
513 input.Connect(layer->GetInputSlot(0));
514
515 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000516 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100517 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
518
519 return *layer;
520}
521
arovir01b0717b52018-09-05 17:03:25 +0100522bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
523 const armnn::TensorShape & outputShape,
524 uint32_t concatDim)
525{
526 // Validate the output shape is correct given the input shapes (which have just been validated)
527 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
528 if (outputShape.GetNumDimensions() != numDimensions)
529 {
530 return Fail("%s: Output shape has wrong number of dimensions", __func__);
531 }
532
533 unsigned int outputSizeAlongConcatenatedDimension = 0;
534 for (unsigned int i = 0; i < inputShapes.size(); i++)
535 {
536 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
537 }
538
539 for (unsigned int i = 0; i < numDimensions; ++i)
540 {
541 if (i == concatDim)
542 {
543 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
544 {
545 return Fail(
546 "%s: Invalid output shape for dimension %d (%d != %d)",
547 __func__,
548 i,
549 outputShape[i],
550 outputSizeAlongConcatenatedDimension);
551 }
552 }
553 else
554 {
555 if (outputShape[i] != inputShapes[0][i])
556 {
557 return Fail("%s: Invalid output shape", __func__);
558 }
559 }
560 }
561
562 return true;
563}
564
565bool RequiresReshape(armnn::TensorShape & inputShape)
566{
567 return inputShape.GetNumDimensions() < 3;
568}
569
arovir01b0717b52018-09-05 17:03:25 +0100570void SwizzleInputs(armnn::INetwork& network,
571 std::vector<LayerInputHandle>& inputs,
572 std::vector<armnn::TensorShape>& inputShapes,
573 const armnn::PermutationVector& mapping)
574{
575 if (!mapping.IsEqual(IdentityPermutation4D))
576 {
577 size_t nInputs = inputs.size();
578 for (size_t i=0; i<nInputs; ++i)
579 {
580 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000581 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100582 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
583 auto& outputInfo = outputSlot.GetTensorInfo();
584 // replace inputs with the swizzled ones
585 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
586 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
587 }
588 }
589}
590
Teresa Charlin185f5882020-04-06 21:59:18 +0100591bool TransposeInputTensors(ConversionData& data,
592 std::vector<LayerInputHandle>& inputs,
593 std::vector<armnn::TensorShape>& inputShapes,
594 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000595{
David Monahan7f492ac2020-10-16 10:36:29 +0100596 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
597 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000598 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100599 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000600 size_t nInputs = inputs.size();
601 for (size_t i=0; i<nInputs; ++i)
602 {
603 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000604 armnn::TransposeDescriptor transposeDesc;
605 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100606 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000607
608 bool isSupported = false;
609 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000610 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000611 data.m_Backends,
612 isSupported,
613 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100614 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000615 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000616 if (!isSupported)
617 {
618 return false;
619 }
620
621 }
622 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
623 }
624 return true;
625}
626
627
narpra01f176d5a2018-11-18 20:17:48 +0000628bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
629 int32_t & concatDimension,
630 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100631{
narpra01f176d5a2018-11-18 20:17:48 +0000632 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100633 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100634
635 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000636 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
637 // or along dimension 0 or 2 for a 3-D tensor.
638 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100639 {
narpra01f176d5a2018-11-18 20:17:48 +0000640 concatDimension = 1;
641 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
642 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100643 }
narpra01f176d5a2018-11-18 20:17:48 +0000644 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100645 {
narpra01f176d5a2018-11-18 20:17:48 +0000646 concatDimension = 0;
647 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
648 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100649 }
David Monahan7f492ac2020-10-16 10:36:29 +0100650 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
651 // permutation identity to only have 3 dimensions
652 else if (numberOfDimensions == 3 && concatDimension == 2)
653 {
654 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
655 }
narpra01f176d5a2018-11-18 20:17:48 +0000656 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100657}
658
659} // anonymous namespace
660
661namespace armnn_driver
662{
663
664//// Creates an ArmNN activation layer and connects it to the given layer, if the
665//// passed in AndroidNN activation function requires so.
666//// @return The end layer of the sequence of layers built for the given AndroidNN
667//// activation function or nullptr if an error occurred (e.g. unsupported activation).
668//// Note that the end layer matches the input layer if no activation is required
669//// (the sequence of layers has length 1).
670armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
671 ActivationFn activation,
672 armnn::IConnectableLayer* prevLayer,
673 ConversionData& data);
674
675} // namespace armnn_driver
676
677///
678/// Utility templates
679///
680
681namespace armnn_driver
682{
683
684using namespace android::nn;
685
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100686template<typename HalPolicy,
687 typename HalOperand = typename HalPolicy::Operand,
688 typename HalOperation = typename HalPolicy::Operation,
689 typename HalModel = typename HalPolicy::Model>
690const HalOperand* GetInputOperand(const HalOperation& operation,
691 uint32_t inputIndex,
692 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100693 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100694{
695 if (inputIndex >= operation.inputs.size())
696 {
saoste01b8471482018-10-10 09:44:51 +0100697 if (failOnIndexOutOfBounds)
698 {
699 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
700 }
arovir01b0717b52018-09-05 17:03:25 +0100701 return nullptr;
702 }
703
Kevin May42477c12020-03-26 13:34:14 +0000704 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100705 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000706 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100707}
708
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709template<typename HalPolicy,
710 typename HalOperand = typename HalPolicy::Operand,
711 typename HalOperation = typename HalPolicy::Operation,
712 typename HalModel = typename HalPolicy::Model>
713const HalOperand* GetOutputOperand(const HalOperation& operation,
714 uint32_t outputIndex,
715 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100716{
717 if (outputIndex >= operation.outputs.size())
718 {
719 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
720 return nullptr;
721 }
722
723 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100724 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100725
Kevin May42477c12020-03-26 13:34:14 +0000726 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100727}
728
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100729template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100730 typename HalOperand = typename HalPolicy::Operand,
731 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100733 const HalModel& model,
734 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000735 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100736{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100738
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100739 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100740 switch (operand.lifetime)
741 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100743 {
744 // Constant found in model.operandValues
745 valueStart = &model.operandValues[operand.location.offset];
746 break;
747 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100748 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100749 {
750 // Constant specified via a Memory object
751 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
752 break;
753 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000755 {
756 // An optional input tensor with no values is not an error so should not register as a fail
757 if (optional)
758 {
759 valueStart = nullptr;
760 break;
761 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100762 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000763 }
arovir01b0717b52018-09-05 17:03:25 +0100764 default:
765 {
766 // Unsupported/invalid (e.g. can't get value of an input to the model)
767 Fail("%s: unsupported/invalid operand lifetime: %s",
768 __func__, toString(operand.lifetime).c_str());
769 valueStart = nullptr;
770 }
771 }
772
773 return valueStart;
774}
775
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100777 typename HalOperation = typename HalPolicy::Operation,
778 typename HalModel = typename HalPolicy::Model,
779 typename HalOperandType = typename HalPolicy::OperandType>
780bool GetOperandType(const HalOperation& operation,
781 uint32_t inputIndex,
782 const HalModel& model,
783 HalOperandType& type)
784{
785 using HalOperand = typename HalPolicy::Operand;
786
787 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
788 if (!operand)
789 {
790 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
791 }
792
793 type = operand->type;
794 return true;
795}
796
797template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000798 typename HalOperand = typename HalPolicy::Operand>
799bool IsOperandConstant(const HalOperand& operand)
800{
801 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
802
803 HalOperandLifeTime lifetime = operand.lifetime;
804
805 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
806 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
807 lifetime == HalOperandLifeTime::NO_VALUE;
808}
809
810template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 typename HalOperand = typename HalPolicy::Operand,
812 typename HalModel = typename HalPolicy::Model>
813ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
814 const HalModel& model,
815 const ConversionData& data,
816 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
817 const armnn::TensorShape* overrideTensorShape = nullptr,
818 bool optional = false)
819{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820 if (!IsOperandTypeSupportedForTensors(operand.type))
821 {
822 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
823 return ConstTensorPin();
824 }
825
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000826 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100827 {
828 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
829 return ConstTensorPin();
830 }
831
832 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
833 if (!valueStart)
834 {
835 if (optional)
836 {
837 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
838 return ConstTensorPin(true);
839 }
840 // mandatory tensor with no values
841 Fail("%s: failed to get operand address", __func__);
842 return ConstTensorPin();
843 }
844
845 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000846
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 if (overrideTensorShape != nullptr)
848 {
849 tensorInfo.SetShape(*overrideTensorShape);
850 }
851 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
852}
853
854template<typename HalPolicy,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
857ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
858 uint32_t inputIndex,
859 const HalModel& model,
860 const ConversionData& data,
861 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
862 const armnn::TensorShape* overrideTensorShape = nullptr,
863 bool optional = false)
864{
865 using HalOperand = typename HalPolicy::Operand;
866
867 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
868 if (!operand)
869 {
870 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
871 return ConstTensorPin();
872 }
873 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
874 model,
875 data,
876 dimensionMappings,
877 overrideTensorShape,
878 optional);
879}
880
881template<typename HalPolicy,
882 typename OutputType,
883 typename HalOperandType = typename HalPolicy::OperandType,
884 typename HalOperation = typename HalPolicy::Operation,
885 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100886bool GetInputScalar(const HalOperation& operation,
887 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100888 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100889 OutputType& outValue,
890 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100891 const ConversionData& data,
892 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100893{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100894 using HalOperand = typename HalPolicy::Operand;
895
896 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100898 {
899 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
900 }
901
Sadik Armagan813f2302020-05-19 14:10:30 +0100902 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100903 {
904 return Fail("%s: unexpected operand type: %s (should be %s)",
905 __func__, toString(operand->type).c_str(), toString(type).c_str());
906 }
907
Sadik Armagan813f2302020-05-19 14:10:30 +0100908 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100909 {
910 return Fail("%s: incorrect operand location length: %i (should be %i)",
911 __func__, operand->location.length, sizeof(OutputType));
912 }
913
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100915 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100916 {
917 return Fail("%s: failed to get address for operand", __func__);
918 }
919
Sadik Armagan813f2302020-05-19 14:10:30 +0100920 if(!optional)
921 {
922 outValue = *(static_cast<const OutputType*>(valueAddress));
923 }
924
arovir01b0717b52018-09-05 17:03:25 +0100925 return true;
926}
927
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928template<typename HalPolicy,
929 typename HalOperation = typename HalPolicy::Operation,
930 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100931bool GetInputInt32(const HalOperation& operation,
932 uint32_t inputIndex,
933 int32_t& outValue,
934 const HalModel& model,
935 const ConversionData& data)
936{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100938}
939
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940template<typename HalPolicy,
941 typename HalOperation = typename HalPolicy::Operation,
942 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100943bool GetInputFloat32(const HalOperation& operation,
944 uint32_t inputIndex,
945 float& outValue,
946 const HalModel& model,
947 const ConversionData& data)
948{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100949 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100950}
951
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100952template<typename HalPolicy,
953 typename HalOperation = typename HalPolicy::Operation,
954 typename HalOperandType = typename HalPolicy::OperandType,
955 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100956bool GetInputActivationFunctionImpl(const HalOperation& operation,
957 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100958 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100959 ActivationFn& outActivationFunction,
960 const HalModel& model,
961 const ConversionData& data)
962{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100963 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100964 {
965 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
966 __func__,
967 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000968 toString(HalOperandType::INT32).c_str(),
969 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100970 }
971
972 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100974 {
975 return Fail("%s: failed to get activation input value", __func__);
976 }
977 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
978 return true;
979}
980
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981template<typename HalPolicy,
982 typename HalOperation = typename HalPolicy::Operation,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetInputActivationFunction(const HalOperation& operation,
985 uint32_t inputIndex,
986 ActivationFn& outActivationFunction,
987 const HalModel& model,
988 const ConversionData& data)
989{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990 return GetInputActivationFunctionImpl<HalPolicy>(operation,
991 inputIndex,
992 HalPolicy::OperandType::INT32,
993 outActivationFunction,
994 model,
995 data);
arovir01b0717b52018-09-05 17:03:25 +0100996}
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998template<typename HalPolicy,
999 typename HalOperation = typename HalPolicy::Operation,
1000 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001001bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1002 uint32_t inputIndex,
1003 ActivationFn& outActivationFunction,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
1007 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1009 inputIndex,
1010 HalPolicy::OperandType::INT32,
1011 outActivationFunction,
1012 model,
1013 data);
arovir01b0717b52018-09-05 17:03:25 +01001014}
1015
1016
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017template<typename HalPolicy,
1018 typename HalOperation = typename HalPolicy::Operation,
1019 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001020bool GetOptionalInputActivation(const HalOperation& operation,
1021 uint32_t inputIndex,
1022 ActivationFn& activationFunction,
1023 const HalModel& model,
1024 const ConversionData& data)
1025{
1026 if (operation.inputs.size() <= inputIndex)
1027 {
1028 activationFunction = ActivationFn::kActivationNone;
1029 }
1030 else
1031 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001033 {
1034 return Fail("%s: Operation has invalid inputs", __func__);
1035 }
1036 }
1037 return true;
1038}
1039
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001040template<typename HalPolicy,
1041 typename ConvolutionDescriptor,
1042 typename HalOperation = typename HalPolicy::Operation,
1043 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001044bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1045 uint32_t dilationXIndex,
1046 ConvolutionDescriptor& descriptor,
1047 const HalModel& model,
1048 const ConversionData& data)
1049{
1050 bool success = true;
1051 if (operation.inputs.size() >= dilationXIndex + 2)
1052 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001053 success &= GetInputScalar<HalPolicy>(operation,
1054 dilationXIndex,
1055 HalPolicy::OperandType::INT32,
1056 descriptor.m_DilationX,
1057 model,
1058 data);
1059 success &= GetInputScalar<HalPolicy>(operation,
1060 dilationXIndex + 1,
1061 HalPolicy::OperandType::INT32,
1062 descriptor.m_DilationY,
1063 model,
1064 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001065 }
1066
1067 return success;
1068}
1069
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001070template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001071 typename HalOperation = typename HalPolicy::Operation,
1072 typename HalModel = typename HalPolicy::Model>
1073bool GetOptionalBool(const HalOperation& operation,
1074 uint32_t inputIndex,
1075 const HalModel& model,
1076 const ConversionData& data)
1077{
1078 using HalOperand = typename HalPolicy::Operand;
1079
1080 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1081 if (!operand)
1082 {
1083 return false;
1084 }
1085
1086 if (!IsBool(*operand))
1087 {
1088 return false;
1089 }
1090
1091 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1092 if (!valueAddress)
1093 {
1094 return false;
1095 }
1096
1097 if (*(static_cast<const bool*>(valueAddress)))
1098 {
1099 return true;
1100 }
1101 else
1102 {
1103 return false;
1104 }
1105}
1106
1107template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 typename HalOperand = typename HalPolicy::Operand,
1109 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001110bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001111 std::vector<int32_t>& outValues,
1112 const HalModel& model,
1113 const ConversionData& data)
1114{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001115 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001116 {
1117 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1118 }
1119
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001120 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001121 if (!startAddress)
1122 {
1123 return Fail("%s: failed to get operand address", __func__, operand.type);
1124 }
1125
1126 // Check number of bytes is sensible
1127 const uint32_t numBytes = operand.location.length;
1128 if (numBytes % sizeof(int32_t) != 0)
1129 {
1130 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1131 __func__, numBytes, sizeof(int32_t));
1132 }
1133
1134 outValues.resize(numBytes / sizeof(int32_t));
1135 memcpy(outValues.data(), startAddress, numBytes);
1136 return true;
1137}
1138
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001139template<typename HalPolicy,
1140 typename HalOperation = typename HalPolicy::Operation,
1141 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001142bool GetInputPaddingScheme(const HalOperation& operation,
1143 uint32_t inputIndex,
1144 PaddingScheme& outPaddingScheme,
1145 const HalModel& model,
1146 const ConversionData& data)
1147{
1148 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001149 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001150 {
1151 return Fail("%s: failed to get padding scheme input value", __func__);
1152 }
1153
1154 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1155 return true;
1156}
1157
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158template<typename HalPolicy,
1159 typename HalOperation = typename HalPolicy::Operation,
1160 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001161LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1162 uint32_t inputIndex,
1163 const HalModel& model,
1164 ConversionData& data)
1165{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001166 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001167 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1169
1170 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001171 if (!operand)
1172 {
1173 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1174 return LayerInputHandle();
1175 }
1176
1177 if (!IsOperandTypeSupportedForTensors(operand->type))
1178 {
1179 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1180 return LayerInputHandle();
1181 }
1182
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 try
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001186 if (IsDynamicTensor(operandTensorInfo))
1187 {
1188 Fail("%s: dynamic input tensors are not supported", __func__);
1189 return LayerInputHandle();
1190 }
arovir01b0717b52018-09-05 17:03:25 +01001191
Sadik Armagan44bcc022019-06-18 17:21:36 +01001192 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001193 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001194 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001195 {
1196 // NOTE: We must check whether we can support the input tensor on at least one
1197 // of the provided backends; otherwise we cannot convert the operation
1198 bool isInputSupported = false;
1199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1200 IsInputSupported,
1201 data.m_Backends,
1202 isInputSupported,
1203 operandTensorInfo);
1204
1205 if (!isInputSupported)
1206 {
1207 Fail("%s: unsupported input tensor", __func__);
1208 return LayerInputHandle();
1209 }
1210
James Ward4e22f602020-10-20 15:50:33 +01001211 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001212 }
1213 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001214 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001215 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001216 // The tensor is either an operand internal to the model, or a model input.
1217 // It can be associated with an ArmNN output slot for an existing layer.
1218
1219 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1220 const uint32_t operandIndex = operation.inputs[inputIndex];
1221 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001223 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001224 case HalOperandLifeTime::CONSTANT_REFERENCE:
1225 {
1226 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1227 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1228 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001229 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001230 bool isSupported = false;
1231 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1232 IsConstantSupported,
1233 data.m_Backends,
1234 isSupported,
1235 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001236 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001237 {
1238 return LayerInputHandle();
1239 }
1240
1241 armnn::IConnectableLayer* constantLayer =
1242 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1243 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1244 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1245
1246 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1247 }
1248 else
1249 {
1250 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001251 return LayerInputHandle();
1252 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001253 break;
arovir01b0717b52018-09-05 17:03:25 +01001254 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001255 default:
arovir01b0717b52018-09-05 17:03:25 +01001256 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001257 // Unsupported lifetime for an input tensor
1258 Fail("%s: unsupported lifetime for input tensor: %s",
1259 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001260 return LayerInputHandle();
1261 }
arovir01b0717b52018-09-05 17:03:25 +01001262 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001263 }
1264 catch (UnsupportedOperand<HalOperandType>& e)
1265 {
1266 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1267 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001268 }
1269}
1270
Kevin May42477c12020-03-26 13:34:14 +00001271
1272#ifdef ARMNN_ANDROID_NN_V1_3
1273template<typename HalPolicy>
1274LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1275 uint32_t inputIndex,
1276 const::android::hardware::neuralnetworks::V1_3::Model& model,
1277 ConversionData& data)
1278{
1279 using HalOperand = typename HalPolicy::Operand;
1280 using HalOperandType = typename HalPolicy::OperandType;
1281 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1282
1283 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1284 if (!operand)
1285 {
1286 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1287 return LayerInputHandle();
1288 }
1289
1290 if (!IsOperandTypeSupportedForTensors(operand->type))
1291 {
1292 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1293 return LayerInputHandle();
1294 }
1295
1296 try
1297 {
1298 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001299
Kevin May42477c12020-03-26 13:34:14 +00001300 if (IsDynamicTensor(operandTensorInfo))
1301 {
Finn Williams291a16b2020-08-19 22:54:00 +01001302 data.m_DynamicInputsEncountered = true;
1303
Finn Williams9a044412020-08-17 19:08:35 +01001304 const uint32_t operandIndex = operation.inputs[inputIndex];
1305
1306 // Check if the dynamic input tensors have been inferred by one of the previous layers
1307 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001308 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001309 {
1310 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1311 }
1312 else
1313 {
1314 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1315 return LayerInputHandle();
1316 }
Kevin May42477c12020-03-26 13:34:14 +00001317 }
1318
1319 switch (operand->lifetime)
1320 {
1321 case HalOperandLifeTime::SUBGRAPH_INPUT:
1322 {
1323 // NOTE: We must check whether we can support the input tensor on at least one
1324 // of the provided backends; otherwise we cannot convert the operation
1325 bool isInputSupported = false;
1326 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1327 IsInputSupported,
1328 data.m_Backends,
1329 isInputSupported,
1330 operandTensorInfo);
1331
1332 if (!isInputSupported)
1333 {
1334 Fail("%s: unsupported input tensor", __func__);
1335 return LayerInputHandle();
1336 }
1337
James Ward4e22f602020-10-20 15:50:33 +01001338 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001339 }
1340 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1341 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1342 {
1343 // The tensor is either an operand internal to the model, or a model input.
1344 // It can be associated with an ArmNN output slot for an existing layer.
1345
1346 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1347 const uint32_t operandIndex = operation.inputs[inputIndex];
1348 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1349 }
1350 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1351 case HalOperandLifeTime::CONSTANT_REFERENCE:
1352 {
1353 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1354 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1355 if (tensorPin.IsValid())
1356 {
1357 bool isSupported = false;
1358 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1359 IsConstantSupported,
1360 data.m_Backends,
1361 isSupported,
1362 tensorPin.GetConstTensor().GetInfo());
1363 if (!isSupported)
1364 {
1365 return LayerInputHandle();
1366 }
1367
1368 armnn::IConnectableLayer* constantLayer =
1369 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1370 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1371 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1372
1373 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1374 }
1375 else
1376 {
1377 Fail("%s: invalid operand tensor", __func__);
1378 return LayerInputHandle();
1379 }
1380 break;
1381 }
1382 default:
1383 {
1384 // Unsupported lifetime for an input tensor
1385 Fail("%s: unsupported lifetime for input tensor: %s",
1386 __func__, toString(operand->lifetime).c_str());
1387 return LayerInputHandle();
1388 }
1389 }
1390 }
1391 catch (UnsupportedOperand<HalOperandType>& e)
1392 {
1393 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1394 return LayerInputHandle();
1395 }
1396}
1397#endif
1398
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001399template<typename HalPolicy,
1400 typename HalOperation = typename HalPolicy::Operation,
1401 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001402bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1403 uint32_t operationOutputIndex,
1404 armnn::IConnectableLayer& layer,
1405 uint32_t layerOutputIndex,
1406 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001407 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001408 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001409 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001410 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001411 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001413 using HalOperand = typename HalPolicy::Operand;
1414
1415 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001416 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1417 {
1418 return false;
1419 }
1420
1421 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001422 if (overrideOutputInfo == nullptr)
1423 {
1424 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1425 }
1426 else
1427 {
1428 outputSlot.SetTensorInfo(*overrideOutputInfo);
1429 }
1430
Finn Williamsa4983ce2020-07-23 12:55:12 +01001431 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001432 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001433 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001434 // Type one dynamic tensors require the previous layer's output shape for inference
1435 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1436 {
1437 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1438 {
1439 return false;
1440 }
1441 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001442 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001443 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001444 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001445 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1446
Sadik Armagandbda4b72020-09-03 11:33:07 +01001447 if(!isSupported)
1448 {
1449 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1450 {
1451 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1452 }
1453 return false;
1454 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001455 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456
Finn Williamsa4983ce2020-07-23 12:55:12 +01001457 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001458
1459 if (activationFunction != ActivationFn::kActivationNone)
1460 {
1461 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1462 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1463 &layer, data);
1464
1465 if (!endLayer)
1466 {
1467 return Fail("%s: ProcessActivation failed", __func__);
1468 }
1469
1470 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1471 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1472 }
1473 else
1474 {
1475 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1476 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001477
Mike Kellyb5fdf382019-06-11 16:35:25 +01001478 return true;
1479}
1480
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481template<typename HalPolicy,
1482 typename HalOperation = typename HalPolicy::Operation,
1483 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001484armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1485 uint32_t inputIndex,
1486 const HalModel& model,
1487 ConversionData& data)
1488{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001489 using HalOperand = typename HalPolicy::Operand;
1490
1491 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001492 if (!operand)
1493 {
1494 return armnn::DataLayout::NHWC;
1495 }
1496
1497 if (!IsBool(*operand))
1498 {
1499 return armnn::DataLayout::NHWC;
1500 }
1501
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001502 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001503 if (!valueAddress)
1504 {
1505 return armnn::DataLayout::NHWC;
1506 }
1507
1508 if (*(static_cast<const bool*>(valueAddress)))
1509 {
1510 return armnn::DataLayout::NCHW;
1511 }
1512 else
1513 {
1514 return armnn::DataLayout::NHWC;
1515 }
1516}
1517
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001518template<typename HalPolicy,
1519 typename HalOperation = typename HalPolicy::Operation,
1520 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001521bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1522 uint32_t outputIndex,
1523 armnn::IConnectableLayer& layer,
1524 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001525 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001526 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001527 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1528 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001529{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001530 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1531 outputIndex,
1532 layer,
1533 outputIndex,
1534 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001535 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001536 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001537 validateFunc,
1538 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001539}
1540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001541template<typename HalPolicy,
1542 typename HalOperation = typename HalPolicy::Operation,
1543 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001544bool ConvertToActivation(const HalOperation& operation,
1545 const char* operationName,
1546 const armnn::ActivationDescriptor& activationDesc,
1547 const HalModel& model,
1548 ConversionData& data)
1549{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001550 using HalOperand = typename HalPolicy::Operand;
1551
1552 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001553 if (!input.IsValid())
1554 {
1555 return Fail("%s: Input 0 is invalid", operationName);
1556 }
1557
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001558 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001559 if (!outputOperand)
1560 {
1561 return false;
1562 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001563
1564 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001565
1566 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001567
1568 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1569 {
1570 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1571 IsActivationSupported,
1572 data.m_Backends,
1573 isSupported,
1574 input.GetTensorInfo(),
1575 outInfo,
1576 activationDesc);
1577 };
1578
1579 if(IsDynamicTensor(outInfo))
1580 {
1581 isSupported = AreDynamicTensorsSupported();
1582 }
1583 else
1584 {
1585 validateFunc(outInfo, isSupported);
1586 }
1587
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001588 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001589 {
1590 return false;
1591 }
1592
1593 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001594 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001595 input.Connect(layer->GetInputSlot(0));
1596
Finn Williamsa4983ce2020-07-23 12:55:12 +01001597 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001598}
1599
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001600template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001601 typename HalOperation = typename HalPolicy::Operation,
1602 typename HalModel = typename HalPolicy::Model>
1603bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1604{
1605 armnn::ActivationDescriptor desc;
1606 desc.m_Function = armnn::ActivationFunction::ReLu;
1607
1608 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1609}
1610
1611template<typename HalPolicy,
1612 typename HalOperation = typename HalPolicy::Operation,
1613 typename HalModel = typename HalPolicy::Model>
1614bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1615{
1616 armnn::ActivationDescriptor desc;
1617 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1618 desc.m_A = 1.0f;
1619 desc.m_B = -1.0f;
1620
1621 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1622}
1623
1624template<typename HalPolicy,
1625 typename HalOperation = typename HalPolicy::Operation,
1626 typename HalModel = typename HalPolicy::Model>
1627bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1628{
1629 armnn::ActivationDescriptor desc;
1630 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1631 desc.m_A = 6.0f;
1632
1633 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1634}
1635
1636template<typename HalPolicy,
1637 typename HalOperation = typename HalPolicy::Operation,
1638 typename HalModel = typename HalPolicy::Model>
1639bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1640{
1641 armnn::ActivationDescriptor desc;
1642 desc.m_Function = armnn::ActivationFunction::TanH;
1643 desc.m_A = 1.0f; // android nn does not support tanH parameters
1644 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1645
1646 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1647}
1648
1649template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001650 typename HalOperation = typename HalPolicy::Operation,
1651 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001652bool ConvertPaddings(const HalOperation& operation,
1653 const HalModel& model,
1654 ConversionData& data,
1655 unsigned int rank,
1656 armnn::PadDescriptor& padDescriptor)
1657{
1658 using HalOperand = typename HalPolicy::Operand;
1659
1660 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1661 if (!paddingsOperand)
1662 {
1663 return Fail("%s: Could not read paddings operand", __func__);
1664 }
1665
1666 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1667 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1668 {
1669 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1670 }
1671
1672 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001673 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1674 {
1675 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1676 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001677
1678 // add padding for each dimension of input tensor.
1679 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1680 {
1681 int paddingBeforeInput = paddings[i];
1682 int paddingAfterInput = paddings[i + 1];
1683
1684 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1685 {
1686 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1687 }
1688
1689 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1690 }
1691
1692 return true;
1693}
1694
1695template<typename HalPolicy,
1696 typename HalOperation = typename HalPolicy::Operation,
1697 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001698bool ConvertPooling2d(const HalOperation& operation,
1699 const char* operationName,
1700 armnn::PoolingAlgorithm poolType,
1701 const HalModel& model,
1702 ConversionData& data)
1703{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001704 using HalOperand = typename HalPolicy::Operand;
1705 using HalOperandType = typename HalPolicy::OperandType;
1706
1707 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001708 if (!input.IsValid())
1709 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001710 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001711 }
1712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001713 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001714 if (!output)
1715 {
1716 return Fail("%s: Could not read output 0", __func__);
1717 }
1718
1719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1721
arovir01b0717b52018-09-05 17:03:25 +01001722 armnn::Pooling2dDescriptor desc;
1723 desc.m_PoolType = poolType;
1724 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001725 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001726
1727 ActivationFn activation;
1728
Sadik Armagan15d63e22019-07-26 16:59:35 +01001729 auto inputSize = operation.inputs.size();
1730
1731 if (inputSize >= 10)
1732 {
1733 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1734 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1735 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1736 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1737 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1738 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1739 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1740 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1741 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1742 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1743 {
1744 return Fail("%s: Operation has invalid inputs", operationName);
1745 }
1746
Kevin May42477c12020-03-26 13:34:14 +00001747 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001748 {
1749 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1750 }
1751 }
1752 else
arovir01b0717b52018-09-05 17:03:25 +01001753 {
1754 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1755 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001756 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1757 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1758 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1759 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1760 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1761 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001762 {
1763 return Fail("%s: Operation has invalid inputs", operationName);
1764 }
1765
Kevin May42477c12020-03-26 13:34:14 +00001766 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001767 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001768 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001770
1771 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1772 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1773 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1774
1775 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1776 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001777 }
1778
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001779 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001780
1781 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1782 {
1783 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1784 IsPooling2dSupported,
1785 data.m_Backends,
1786 isSupported,
1787 inputInfo,
1788 outputInfo,
1789 desc);
1790
1791 };
1792
1793 if(IsDynamicTensor(outputInfo))
1794 {
1795 isSupported = AreDynamicTensorsSupported();
1796 }
1797 else
1798 {
1799 validateFunc(outputInfo, isSupported);
1800 }
1801
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001802 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001803 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001804 return false;
arovir01b0717b52018-09-05 17:03:25 +01001805 }
arovir01b0717b52018-09-05 17:03:25 +01001806
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001807 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1808 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001809 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001810 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001811 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001812
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001813 input.Connect(pooling2dLayer->GetInputSlot(0));
1814
Finn Williamsa4983ce2020-07-23 12:55:12 +01001815 if (!isSupported)
1816 {
1817 return false;
1818 }
1819
Kevin Mayfcf2a152020-09-08 16:06:32 +01001820 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1821 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001822}
1823
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001824template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001825 typename HalOperation = typename HalPolicy::Operation,
1826 typename HalModel = typename HalPolicy::Model>
1827bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001828{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001829 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001830
1831 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1832 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1833
1834 if (!input0.IsValid() || !input1.IsValid())
1835 {
1836 return Fail("%s: Operation has invalid inputs", __func__);
1837 }
1838
1839 // The FuseActivation parameter is always the input index 2
1840 // and it should be optional
1841 ActivationFn activationFunction;
1842 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1843 {
1844 return Fail("%s: Operation has invalid inputs", __func__);
1845 }
1846
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001847 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001848 if (!outputOperand)
1849 {
1850 return false;
1851 }
1852
1853 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1854 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1855
1856 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001857
1858 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001859 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1860 {
1861 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1862 IsAdditionSupported,
1863 data.m_Backends,
1864 isSupported,
1865 inputInfo0,
1866 inputInfo1,
1867 outputInfo);
1868 };
1869
1870 if(!IsDynamicTensor(outputInfo))
1871 {
1872 validateFunc(outputInfo, isSupported);
1873 }
1874 else
1875 {
1876 isSupported = AreDynamicTensorsSupported();
1877 }
1878
Mike Kelly46272802019-08-14 17:00:48 +01001879 if (!isSupported)
1880 {
1881 return false;
1882 }
1883
1884 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001885
Kevin Mayfcf2a152020-09-08 16:06:32 +01001886 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1887 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001888 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001889 return false;
1890 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001891
Kevin Mayfcf2a152020-09-08 16:06:32 +01001892 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1893 data, nullptr, validateFunc, activationFunction);
1894
Mike Kelly46272802019-08-14 17:00:48 +01001895}
1896
1897template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001898 typename HalOperation = typename HalPolicy::Operation,
1899 typename HalModel = typename HalPolicy::Model>
1900bool ConvertArgMinMax(const HalOperation& operation,
1901 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001902 ConversionData& data,
1903 armnn::ArgMinMaxFunction argMinMaxFunction)
1904{
1905 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1906
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001907 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001908 using HalOperandType = typename HalPolicy::OperandType;
1909
1910 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1911
1912 if (!input0.IsValid())
1913 {
1914 return Fail("%s: Operation has invalid inputs", __func__);
1915 }
1916
1917 int32_t axis;
1918 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1919 {
1920 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1921 }
1922
1923 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1924 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1925
1926 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1927 {
1928 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1929 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1930 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1931 return Fail("%s: Axis must be in range [-n, n)", __func__);
1932 }
1933
1934 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1935 if (!output)
1936 {
1937 return Fail("%s: Could not read output 0", __func__);
1938 }
1939
1940 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1941
1942 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001943
1944 armnn::ArgMinMaxDescriptor descriptor;
1945 descriptor.m_Function = argMinMaxFunction;
1946 descriptor.m_Axis = axis;
1947
1948 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001949
1950 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1951 {
1952 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1953 IsArgMinMaxSupported,
1954 data.m_Backends,
1955 isSupported,
1956 inputInfo0,
1957 outputInfo,
1958 descriptor);
1959 };
1960
1961 if(IsDynamicTensor(outputInfo))
1962 {
1963 isSupported = AreDynamicTensorsSupported();
1964 }
1965 else
1966 {
1967 validateFunc(outputInfo, isSupported);
1968 }
1969
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001970 if (!isSupported)
1971 {
1972 return false;
1973 }
1974
1975 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1976 assert(layer != nullptr);
1977
1978 input0.Connect(layer->GetInputSlot(0));
1979
Finn Williamsa4983ce2020-07-23 12:55:12 +01001980 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001981}
1982
1983template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001984 typename HalOperation = typename HalPolicy::Operation,
1985 typename HalModel = typename HalPolicy::Model>
1986bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001987{
Keith Davis6e4081f2020-09-03 13:17:21 +01001988 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001989 using HalOperandType = typename HalPolicy::OperandType;
1990
1991 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1992 if (operation.inputs.size() <= 1)
1993 {
1994 return Fail("%s: Operation has insufficient arguments", __func__);
1995 }
1996
1997 // Get inputs and outputs
1998 const std::size_t numInputTensors = operation.inputs.size() - 1;
1999
2000 int32_t concatDim;
2001 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2002 {
2003 return Fail("%s: Operation has invalid inputs", __func__);
2004 }
2005
2006 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2007 if (!outputOperand)
2008 {
2009 return Fail("%s: Operation has no outputs", __func__);
2010 }
2011
Keith Davis6e4081f2020-09-03 13:17:21 +01002012 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2013 armnn::TensorShape outputShape = outputInfo.GetShape();
2014 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002015 //
2016 // handle negative concat dims along the lines of tensorflow as described here:
2017 // https://www.tensorflow.org/api_docs/python/tf/concat
2018 // "negative axis refers to axis + rank(values)-th dimension"
2019 //
2020 if (concatDim < 0)
2021 {
2022 concatDim += outputShape.GetNumDimensions();
2023 }
2024
2025 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2026 {
2027 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2028 }
2029
2030 std::vector<LayerInputHandle> inputHandles;
2031 std::vector<armnn::TensorShape> inputShapes;
2032
2033 inputHandles.reserve(numInputTensors);
2034 inputShapes.reserve(numInputTensors);
2035
Keith Davis6e4081f2020-09-03 13:17:21 +01002036 bool inputsHaveBeenReshaped = false;
2037 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002038 for (uint32_t i = 0; i < numInputTensors; ++i)
2039 {
2040 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2041 if (!operand)
2042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
Teresa Charlin3b959602019-10-31 17:05:47 +00002046 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2047 if (!operandInputHandle.IsValid())
2048 {
2049 return Fail("%s: Operation has invalid inputs", __func__);
2050 }
Mike Kellyb8805202019-07-31 17:25:43 +01002051
Keith Davis6e4081f2020-09-03 13:17:21 +01002052 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002053 if (operandShape.GetNumDimensions() == 0)
2054 {
2055 return Fail("%s: Operands with rank 0 are not supported", __func__);
2056 }
2057
2058 if (RequiresReshape(operandShape))
2059 {
2060 inputsHaveBeenReshaped = true;
2061
2062 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2063
2064 // Expand the tensor to three dimensions
2065 if (operandShape.GetNumDimensions() == 2)
2066 {
2067 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2068 tensorDimensionsAdded = 1;
2069 }
2070 else
2071 {
2072 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2073 tensorDimensionsAdded = 2;
2074 }
2075
Kevin Mayaed08ac2019-12-12 16:33:31 +00002076 armnn::ReshapeDescriptor reshapeDescriptor;
2077 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2078
2079 bool isSupported = false;
2080 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2081 IsReshapeSupported,
2082 data.m_Backends,
2083 isSupported,
2084 operandInputHandle.GetTensorInfo(),
2085 reshapeInfo,
2086 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002087
Kevin Mayaed08ac2019-12-12 16:33:31 +00002088 if (!isSupported)
2089 {
2090 return false;
2091 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002092 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002093
2094 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002095 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002096 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2097 }
2098
2099 inputShapes.emplace_back(operandShape);
2100 inputHandles.emplace_back(operandInputHandle);
2101
2102 if (!inputHandles.back().IsValid())
2103 {
2104 return Fail("%s: Operation has invalid inputs", __func__);
2105 }
2106 }
2107
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002108 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002109
2110 if (inputsHaveBeenReshaped)
2111 {
2112 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2113 concatDim += tensorDimensionsAdded;
2114
2115 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2116 if (tensorDimensionsAdded == 1)
2117 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002118 if (IsDynamicTensor(outputInfo))
2119 {
2120 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2121 }
2122 else
2123 {
2124 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2125 }
Mike Kellyb8805202019-07-31 17:25:43 +01002126 }
2127 else if (tensorDimensionsAdded == 2)
2128 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002129 if (IsDynamicTensor(outputInfo))
2130 {
2131 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2132 }
2133 else
2134 {
2135 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2136 }
Mike Kellyb8805202019-07-31 17:25:43 +01002137 }
2138 }
2139
2140 // Check if permutations is required and get the pair of permutations required for the concatenation.
2141 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2142 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002143 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002144 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2145 concatDim,
2146 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002147
Keith Davis6e4081f2020-09-03 13:17:21 +01002148 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2149 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002150 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002151 if (needPermute)
2152 {
2153 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2154 }
2155
2156 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002157 }
Mike Kellyb8805202019-07-31 17:25:43 +01002158 // this is no-op for identity swizzles, otherwise it replaces both
2159 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002160 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002161 {
2162 return false;
2163 }
Mike Kellyb8805202019-07-31 17:25:43 +01002164
2165 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2166 armnn::OriginsDescriptor concatDescriptor;
2167
2168 try
2169 {
2170 // The concat descriptor is always created across the only supported concat dimension
2171 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002172 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2173 inputShapes.end(),
2174 concatDim);
2175 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002176 {
2177 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2178 }
2179
2180 // Validate the output shape is correct given the input shapes based on the
2181 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002182 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002183 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002184 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2185 {
2186 return Fail("%s: Error validating the output shape for concat", __func__);
2187 }
Mike Kellyb8805202019-07-31 17:25:43 +01002188 }
2189
2190 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2191 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002192 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002193
Keith Davis6e4081f2020-09-03 13:17:21 +01002194 bool isSupported = false;
2195 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2196 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2197 outputInfo, concatDescriptor);
2198 };
2199
2200 if (!isDynamicTensor)
2201 {
2202 validateFunc(outputInfo, isSupported);
2203 }
2204 else
2205 {
2206 isSupported = AreDynamicTensorsSupported();
2207 }
2208
Mike Kellyb8805202019-07-31 17:25:43 +01002209 if (!isSupported)
2210 {
2211 return false;
2212 }
2213
2214 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2215 assert(layer != nullptr);
2216 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002217 // Connect inputs to the layer
2218 const int numInputSlots = layer->GetNumInputSlots();
2219 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2220 for (int i = 0; i < numInputSlots; ++i)
2221 {
2222 // connect the input directly to the merge (concat) layer
2223 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2224 }
2225
Keith Davis6e4081f2020-09-03 13:17:21 +01002226 // Transpose the output shape
2227 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002228 armnn::TransposeDescriptor transposeDesc;
2229 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002230 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2231 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2232 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002233 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002234 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002235 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002236 data.m_Backends,
2237 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002238 inputTransposeInfo,
2239 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002240 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002241 if (!isSupported)
2242 {
2243 return false;
2244 }
Mike Kellyb8805202019-07-31 17:25:43 +01002245 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002246 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002247 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002248 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002249
2250 return true;
2251 };
2252
2253 if (needPermute && !isDynamicTensor)
2254 {
2255 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002256 }
2257
2258 if (inputsHaveBeenReshaped)
2259 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002260 if (isDynamicTensor)
2261 {
2262 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002263 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002264 if (!ValidateConcatOutputShape(inputShapes,
2265 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2266 concatDim))
2267 {
2268 return Fail("%s: Error validating the output shape for concat", __func__);
2269 }
2270 transposeOutputShape();
2271 }
2272
Mike Kellyb8805202019-07-31 17:25:43 +01002273 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002274 // Undo the reshape knowing the amount of dimensions added
2275 if (tensorDimensionsAdded == 1)
2276 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002277 afterConcatInfo.SetShape(
2278 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002279 }
2280 else if (tensorDimensionsAdded == 2)
2281 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002282 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002283 }
2284
Kevin Mayaed08ac2019-12-12 16:33:31 +00002285 armnn::ReshapeDescriptor reshapeDescriptor;
2286 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002287 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002288
Keith Davis6e4081f2020-09-03 13:17:21 +01002289 isSupported = false;
2290 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2292 IsReshapeSupported,
2293 data.m_Backends,
2294 isSupported,
2295 concatInfo,
2296 afterConcatInfo,
2297 reshapeDescriptor);
2298 };
2299
2300 if (!IsDynamicTensor(afterConcatInfo))
2301 {
2302 validateReshapeFunc(afterConcatInfo, isSupported);
2303 }
2304 else
2305 {
2306 isSupported = AreDynamicTensorsSupported();
2307 }
2308
Kevin Mayaed08ac2019-12-12 16:33:31 +00002309 if (!isSupported)
2310 {
2311 return false;
2312 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002313 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2314 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2315 0,
2316 *layer,
2317 model,
2318 data,
2319 nullptr,
2320 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002321 }
2322
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002324}
2325
2326template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002327 typename HalOperation = typename HalPolicy::Operation,
2328 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002329bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2330{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002331 using HalOperand = typename HalPolicy::Operand;
2332 using HalOperandType = typename HalPolicy::OperandType;
2333
2334 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002335 if (!input.IsValid())
2336 {
2337 return Fail("%s: Operation has invalid inputs", __func__);
2338 }
2339
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002340 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341 if (!output)
2342 {
2343 return Fail("%s: Could not read output 0", __func__);
2344 }
2345
2346 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002347 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002348
2349 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002350 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2351 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002352
2353 if (!weightsPin.IsValid() || !biasPin.IsValid())
2354 {
2355 return Fail("%s: Operation has invalid inputs", __func__);
2356 }
2357
2358 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002359 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002360 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2361
2362 armnn::Convolution2dDescriptor desc;
2363 desc.m_DataLayout = armnn::DataLayout::NHWC;
2364 ActivationFn activation;
2365
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002366 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002367 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002368 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2369 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2370 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2371 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2372 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2373 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002374 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002375 {
2376 return Fail("%s: Operation has invalid inputs", __func__);
2377 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002378 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002379 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380 {
2381 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002382 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2383 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2384 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002385 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002386 {
2387 return Fail("%s: Operation has invalid inputs", __func__);
2388 }
2389
2390 const uint32_t kernelX = weights.GetShape()[2];
2391 const uint32_t kernelY = weights.GetShape()[1];
2392 const uint32_t inputX = inputInfo.GetShape()[2];
2393 const uint32_t inputY = inputInfo.GetShape()[1];
2394
2395 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2396 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002397 }
2398 else
2399 {
2400 return Fail("%s: Unsupported number of operation inputs", __func__);
2401 }
2402
2403 desc.m_BiasEnabled = true;
2404 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2405
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002406 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002407 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2408 {
2409 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2410 IsConvolution2dSupported,
2411 data.m_Backends,
2412 isSupported,
2413 inputInfo,
2414 outputInfo,
2415 desc,
2416 weights.GetInfo(),
2417 biases);
2418 };
2419
2420 if(!IsDynamicTensor(outputInfo))
2421 {
2422 validateFunc(outputInfo, isSupported);
2423 }
2424 else
2425 {
2426 isSupported = AreDynamicTensorsSupported();
2427 }
2428
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002429 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002430 {
2431 return false;
2432 }
2433
2434 armnn::IConnectableLayer* startLayer =
2435 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2436
2437 if (!startLayer)
2438 {
2439 return Fail("%s: AddConvolution2dLayer failed", __func__);
2440 }
2441
Mike Kellyb5fdf382019-06-11 16:35:25 +01002442 input.Connect(startLayer->GetInputSlot(0));
2443
Kevin Mayfcf2a152020-09-08 16:06:32 +01002444 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2445 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002446}
2447
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002448template<typename HalPolicy,
2449 typename HalOperation = typename HalPolicy::Operation,
2450 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002451bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2452{
2453 using HalOperand = typename HalPolicy::Operand;
2454 using HalOperandType = typename HalPolicy::OperandType;
2455
2456 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2457 if (!input.IsValid() )
2458 {
2459 return Fail("%s: Operation has invalid inputs", __func__);
2460 }
2461
2462 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2463 unsigned int rank = inputInfo.GetNumDimensions();
2464 if (rank != 4)
2465 {
2466 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2467 }
2468
2469 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2470 if (!output)
2471 {
2472 return Fail("%s: Could not read output 0", __func__);
2473 }
2474
2475 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002476
2477 armnn::DepthToSpaceDescriptor descriptor;
2478
2479 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2480 if (descriptor.m_BlockSize <= 1)
2481 {
2482 return Fail("%s: Block size must be at least 1 in all dimensions");
2483 }
2484
2485 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002486 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002487 {
2488 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2489 }
2490
2491 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002492 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2493 {
2494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2495 IsDepthToSpaceSupported,
2496 data.m_Backends,
2497 isSupported,
2498 inputInfo,
2499 outputInfo,
2500 descriptor);
2501 };
2502
2503 if(!IsDynamicTensor(outputInfo))
2504 {
2505 validateFunc(outputInfo, isSupported);
2506 }
2507 else
2508 {
2509 isSupported = AreDynamicTensorsSupported();
2510 }
2511
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002512 if (!isSupported)
2513 {
2514 return false;
2515 }
2516
2517 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2518 assert(layer != nullptr);
2519 input.Connect(layer->GetInputSlot(0));
2520
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002521 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002522}
2523
2524template<typename HalPolicy,
2525 typename HalOperation = typename HalPolicy::Operation,
2526 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002527bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2528{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002529 using HalOperand = typename HalPolicy::Operand;
2530 using HalOperandType = typename HalPolicy::OperandType;
2531
2532 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002533
2534 if (!input.IsValid())
2535 {
2536 return Fail("%s: Operation has invalid inputs", __func__);
2537 }
2538
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002539 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002540
2541 if (!output)
2542 {
2543 return Fail("%s: Could not read output 0", __func__);
2544 }
2545
2546 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002547 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002548
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002549 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002550 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002551 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002552
2553 if (weightsOperand == nullptr)
2554 {
2555 return Fail("%s: Operand is invalid", __func__);
2556 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002557 // Basic sanity check on the weights shape.
2558 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2559 // [1, filter_height, filter_width, depth_out]
2560 if (weightsOperand->dimensions[0] != 1)
2561 {
2562 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2563 }
2564
Mike Kellyb5fdf382019-06-11 16:35:25 +01002565 armnn::DepthwiseConvolution2dDescriptor desc;
2566 desc.m_DataLayout = armnn::DataLayout::NHWC;
2567
Jan Eilersa20d2b82021-04-27 09:21:08 +01002568 // The layout for weights in depthwise is [ 1, H, W, O] and it's the same in ArmNN. No need to permute anything.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002569 const ConstTensorPin weightsPin =
2570 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2571 1,
2572 model,
Jan Eilersa20d2b82021-04-27 09:21:08 +01002573 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002574
2575 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002576 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002577
2578 if (!weightsPin.IsValid() || !biasPin.IsValid())
2579 {
2580 return Fail("%s: Operation has invalid inputs", __func__);
2581 }
2582
2583 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2584 armnn::ConstTensor bias = biasPin.GetConstTensor();
2585 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2586
2587 ActivationFn activation;
2588
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002589 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002590 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002591 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2592 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2593 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2594 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2595 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2596 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002597 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002598 {
2599 return Fail("%s: Operation has invalid inputs", __func__);
2600 }
2601 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002602 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002603 {
2604 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002605 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2606 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2607 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002608 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002609 {
2610 return Fail("%s: Operation has invalid inputs", __func__);
2611 }
2612
Jan Eilersa20d2b82021-04-27 09:21:08 +01002613 const uint32_t kernelX = weights.GetShape()[2];
2614 const uint32_t kernelY = weights.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002615 const uint32_t inputX = inputInfo.GetShape()[2];
2616 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002617
2618 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2619 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2620 }
2621 else
2622 {
2623 return Fail("%s: Unsupported number of operation inputs", __func__);
2624 }
2625
2626 desc.m_BiasEnabled = true;
2627 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2628
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002629 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002630 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2631 {
2632 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2633 IsDepthwiseConvolutionSupported,
2634 data.m_Backends,
2635 isSupported,
2636 inputInfo,
2637 outputInfo,
2638 desc,
2639 weights.GetInfo(),
2640 biases);
2641 };
2642
2643 if(!IsDynamicTensor(outputInfo))
2644 {
2645 validateFunc(outputInfo, isSupported);
2646 }
2647 else
2648 {
2649 isSupported = AreDynamicTensorsSupported();
2650 }
2651
2652
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002653 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002654 {
2655 return false;
2656 }
2657
2658 armnn::IConnectableLayer* startLayer =
2659 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2660 if (!startLayer)
2661 {
2662 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2663 }
2664
Mike Kellyb5fdf382019-06-11 16:35:25 +01002665 input.Connect(startLayer->GetInputSlot(0));
2666
Kevin Mayfcf2a152020-09-08 16:06:32 +01002667 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2668 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002669}
2670
Mike Kelly3c673942019-07-25 09:26:06 +01002671template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002672 typename HalOperation = typename HalPolicy::Operation,
2673 typename HalModel = typename HalPolicy::Model>
2674bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002675{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002676 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002677
2678 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2679 if (!input.IsValid())
2680 {
2681 return Fail("%s: Operation has invalid input", __func__);
2682 }
2683
Sadik Armagan98c0f662019-11-21 15:54:36 +00002684 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2685 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2686 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2687 {
2688 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2689 }
2690
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002691 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002692 if (!outputOperand)
2693 {
2694 return Fail("%s: Operation has invalid outputs", __func__);
2695 }
2696
2697 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002698
2699 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002700 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2701 {
2702 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2703 IsDequantizeSupported,
2704 data.m_Backends,
2705 isSupported,
2706 inputInfo,
2707 outputInfo);
2708 };
2709
2710 if(IsDynamicTensor(outputInfo))
2711 {
2712 isSupported = AreDynamicTensorsSupported();
2713 }
2714 else
2715 {
2716 validateFunc(outputInfo, isSupported);
2717 }
2718
Mike Kelly46272802019-08-14 17:00:48 +01002719 if (!isSupported)
2720 {
2721 return false;
2722 }
2723
2724 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2725 assert(layer != nullptr);
2726 input.Connect(layer->GetInputSlot(0));
2727
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002728 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002729}
2730
2731template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002732 typename HalOperation = typename HalPolicy::Operation,
2733 typename HalModel = typename HalPolicy::Model>
2734bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002735{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002736 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002737
2738 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2739 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2740
2741 if (!input0.IsValid() || !input1.IsValid())
2742 {
2743 return Fail("%s: Operation has invalid inputs", __func__);
2744 }
2745
2746 // The FuseActivation parameter is always the input index 2
2747 // and it should be optional
2748 ActivationFn activationFunction;
2749 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2750 {
2751 return Fail("%s: Operation has invalid inputs", __func__);
2752 }
2753
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002754 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002755 if (!output)
2756 {
2757 return Fail("%s: Could not read output 0", __func__);
2758 }
2759
2760 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002761
2762 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002763 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2764 {
2765 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2766 IsDivisionSupported,
2767 data.m_Backends,
2768 isSupported,
2769 input0.GetTensorInfo(),
2770 input1.GetTensorInfo(),
2771 outputInfo);
2772 };
2773
2774 if(!IsDynamicTensor(outputInfo))
2775 {
2776 validateFunc(outputInfo, isSupported);
2777 }
2778 else
2779 {
2780 isSupported = AreDynamicTensorsSupported();
2781 }
2782
Mike Kelly46272802019-08-14 17:00:48 +01002783 if (!isSupported)
2784 {
2785 return false;
2786 }
2787
2788 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002789
Kevin Mayfcf2a152020-09-08 16:06:32 +01002790 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2791 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002792 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002793 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002794 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002795
2796 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2797 data, nullptr, validateFunc, activationFunction);
2798
Mike Kelly46272802019-08-14 17:00:48 +01002799}
2800
2801template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002802 typename HalOperation = typename HalPolicy::Operation,
2803 typename HalModel = typename HalPolicy::Model>
2804bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002805{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002806 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002807
2808 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2809 if (!input.IsValid())
2810 {
2811 return Fail("%s: Operation has invalid inputs", __func__);
2812 }
2813
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002814 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002815 if (!outputOperand)
2816 {
2817 return Fail("%s: Operation has invalid outputs", __func__);
2818 }
2819
2820 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002821
2822 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002823 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2824 {
2825 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2826 IsFloorSupported,
2827 data.m_Backends,
2828 isSupported,
2829 input.GetTensorInfo(),
2830 outputInfo);
2831 };
2832
2833 if(!IsDynamicTensor(outputInfo))
2834 {
2835 validateFunc(outputInfo, isSupported);
2836 }
2837 else
2838 {
2839 isSupported = AreDynamicTensorsSupported();
2840 }
2841
Mike Kelly46272802019-08-14 17:00:48 +01002842 if (!isSupported)
2843 {
2844 return false;
2845 }
2846
2847 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2848 assert(layer != nullptr);
2849 input.Connect(layer->GetInputSlot(0));
2850
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002851 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002852}
2853
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002854inline bool IsQSymm8(const V1_0::Operand&)
2855{
2856 return false;
2857}
2858
Kevin May42477c12020-03-26 13:34:14 +00002859#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002860
2861inline bool IsQSymm8(const V1_2::Operand& operand)
2862{
2863 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2864}
2865
2866#endif
2867
Kevin May42477c12020-03-26 13:34:14 +00002868#ifdef ARMNN_ANDROID_NN_V1_3
2869
2870inline bool IsQSymm8(const V1_3::Operand& operand)
2871{
2872 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2873}
2874
2875#endif
2876
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002877enum class DequantizeStatus
2878{
2879 SUCCESS,
2880 NOT_REQUIRED,
2881 INVALID_OPERAND
2882};
2883
2884using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2885
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002886template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002887 typename HalOperation = typename HalPolicy::Operation,
2888 typename HalModel = typename HalPolicy::Model>
2889DequantizeResult DequantizeIfRequired(size_t operand_index,
2890 const HalOperation& operation,
2891 const HalModel& model,
2892 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002893{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002894 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002895
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002896 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002897 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002898 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002899 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002900 }
2901
2902 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2903 {
2904 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002905 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002906 }
2907
2908 const size_t weightsInputIndex = operation.inputs[operand_index];
2909
2910 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2911 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002912 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002913 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002914 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002915 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002916 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2917 {
2918 continue;
2919 }
2920
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002921 size_t outOpIndex = weightsInputIndex + 1;
2922 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002923 {
2924 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002925 }
2926
2927 if (outOpIndex != weightsInputIndex)
2928 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002929 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002930 }
2931
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002932 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002933 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002934
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002935 if (!IsQSymm8(*operand))
2936 {
2937 // Only supporting dequantize from QSYMM8 to FLOAT
2938 break;
2939 }
2940
2941 // Allocate a new buffer for the dequantized data and manually dequantize
2942 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2943 if (!startValue)
2944 {
2945 // Failed to get the operand address
2946 break;
2947 }
2948
2949 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2950 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002951 const float quantizationScale = operand->scale;
2952
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002953 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2954 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2955 {
2956 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002957 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002958 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2959 }
2960
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002961 // Construct tensor info for dequantized ConstTensor
2962 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2963 operand->dimensions.data(),
2964 armnn::DataType::Float32);
2965
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002966 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2967 std::move(tensorInfo),
2968 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002969 }
2970
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002971 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002972}
2973
2974template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002975 typename HalOperation = typename HalPolicy::Operation,
2976 typename HalModel = typename HalPolicy::Model>
2977ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2978 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002979 const ConversionData& data,
2980 size_t operandIndex,
2981 bool optional = false)
2982{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002983 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2984
2985 DequantizeStatus status = std::get<3>(dequantized);
2986 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002987 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002988 case DequantizeStatus::INVALID_OPERAND:
2989 {
2990 // return invalid const tensor pin
2991 return ConstTensorPin();
2992 }
2993 case DequantizeStatus::NOT_REQUIRED:
2994 {
2995 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2996 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2997 }
2998 case DequantizeStatus::SUCCESS:
2999 default:
3000 {
3001 return ConstTensorPin(
3002 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3003 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003004 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003005}
3006
3007
Mike Kelly46272802019-08-14 17:00:48 +01003008template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003009 typename HalOperation = typename HalPolicy::Operation,
3010 typename HalModel = typename HalPolicy::Model>
3011bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003012{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003013 using HalOperand = typename HalPolicy::Operand;
3014
Mike Kelly46272802019-08-14 17:00:48 +01003015 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3016 if (!input.IsValid())
3017 {
3018 return Fail("%s: Operation has invalid inputs", __func__);
3019 }
3020
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003021 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003022 if (!output)
3023 {
3024 return Fail("%s: Could not read output 0", __func__);
3025 }
3026
3027 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3028 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3029
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003030 LayerInputHandle weightsInput = LayerInputHandle();
3031 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3032 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003033 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003034 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003035 }
3036
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003037 const armnn::TensorInfo& weightsInfo = GetTensorInfoForOperand(*weightsOperand);
3038 bool constantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
3039
3040 armnn::Optional<armnn::ConstTensor> optionalWeights = armnn::EmptyOptional();
3041 if (!constantWeights)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003042 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003043 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3044 if (!weightsInput.IsValid())
3045 {
3046 return Fail("%s: Operation has invalid inputs", __func__);
3047 }
3048 }
3049 else
3050 {
3051 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3052 if (!weightsPin.IsValid())
3053 {
3054 return Fail("%s: Operation has invalid weights", __func__);
3055 }
3056 optionalWeights = armnn::Optional<armnn::ConstTensor>(weightsPin.GetConstTensor());
Mike Kelly46272802019-08-14 17:00:48 +01003057 }
3058
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003059 LayerInputHandle biasInput = LayerInputHandle();
3060 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3061 if (!biasOperand)
3062 {
3063 return Fail("%s: Could not read bias", __func__);
3064 }
3065 armnn::TensorInfo biasInfo = GetTensorInfoForOperand(*biasOperand);
3066 bool constantBias = IsOperandConstant<HalPolicy>(*biasOperand);
3067
3068 armnn::Optional<armnn::ConstTensor> optionalBias = armnn::EmptyOptional();
3069 if (!constantBias)
3070 {
3071 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data);
3072 if (!biasInput.IsValid())
3073 {
3074 return Fail("%s: Operation has invalid inputs", __func__);
3075 }
3076 }
3077 else
3078 {
3079 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
3080 if (!biasPin.IsValid())
3081 {
3082 return Fail("%s: Operation has invalid bias", __func__);
3083 }
3084 optionalBias = armnn::Optional<armnn::ConstTensor>(biasPin.GetConstTensor());
3085 }
3086
3087 if ((constantWeights && !constantBias) || (!constantWeights && constantBias))
3088 {
3089 return Fail("%s: Non-compatible weights and bias", __func__);
3090 }
3091
Mike Kelly46272802019-08-14 17:00:48 +01003092 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003093 try
3094 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003095 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003096 }
3097 catch (const std::exception& e)
3098 {
Mike Kelly46272802019-08-14 17:00:48 +01003099 return Fail("%s: %s", __func__, e.what());
3100 }
3101
3102 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003103 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003104
3105 ActivationFn activationFunction;
3106 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3107 {
3108 return Fail("%s: Operation has invalid inputs", __func__);
3109 }
3110
3111 armnn::FullyConnectedDescriptor desc;
3112 desc.m_TransposeWeightMatrix = true;
3113 desc.m_BiasEnabled = true;
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003114 desc.m_ConstantWeights = constantWeights;
Mike Kelly46272802019-08-14 17:00:48 +01003115
3116 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003117 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3118 {
Finn Williams49184462020-10-02 13:28:34 +01003119 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003120 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003121 outputInfo.GetShape(),
3122 desc.m_TransposeWeightMatrix))
3123 {
3124 isSupported = false;
3125 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3126 return;
3127 }
3128
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003129 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003130 IsFullyConnectedSupported,
3131 data.m_Backends,
3132 isSupported,
3133 reshapedInfo,
3134 outputInfo,
3135 weightsInfo,
3136 biasInfo,
3137 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003138 };
3139
3140 if(!IsDynamicTensor(outputInfo))
3141 {
3142 validateFunc(outputInfo, isSupported);
3143 }
3144 else
3145 {
3146 isSupported = AreDynamicTensorsSupported();
3147 }
3148
Mike Kelly46272802019-08-14 17:00:48 +01003149 if (!isSupported)
3150 {
3151 return false;
3152 }
3153
3154 armnn::IConnectableLayer* startLayer =
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003155 data.m_Network->AddFullyConnectedLayer(desc,
3156 optionalWeights,
3157 optionalBias);
Mike Kelly46272802019-08-14 17:00:48 +01003158
Kevin Mayfcf2a152020-09-08 16:06:32 +01003159 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003160 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003161 armnn::ReshapeDescriptor reshapeDescriptor;
3162 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003163
Kevin Mayfcf2a152020-09-08 16:06:32 +01003164 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3165 assert(reshapeLayer != nullptr);
3166 input.Connect(reshapeLayer->GetInputSlot(0));
3167 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3168 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003169 }
3170 else
3171 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003172 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003173 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003174
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003175 // connect weights input
3176 if (!desc.m_ConstantWeights)
3177 {
3178 weightsInput.Connect(startLayer->GetInputSlot(1));
3179 biasInput.Connect(startLayer->GetInputSlot(2));
3180 }
3181
Kevin Mayfcf2a152020-09-08 16:06:32 +01003182 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3183 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003184}
3185
3186template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003187 typename HalOperation = typename HalPolicy::Operation,
3188 typename HalModel = typename HalPolicy::Model>
3189bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003190{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003191 using HalOperand = typename HalPolicy::Operand;
3192
Mike Kelly999e2092019-08-15 10:46:46 +01003193 if (operation.inputs.size() != 1)
3194 {
3195 return Fail("%s: Optional inputs are not supported", __func__);
3196 }
3197
Mike Kelly46272802019-08-14 17:00:48 +01003198 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3199 if (!input.IsValid())
3200 {
3201 return Fail("%s: Operation has invalid inputs", __func__);
3202 }
3203
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003204 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003205 if (!output)
3206 {
3207 return Fail("%s: Could not read output 0", __func__);
3208 }
3209
3210 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3211 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3212
Mike Kelly46272802019-08-14 17:00:48 +01003213 if (outputInfo.GetNumDimensions() != 4u)
3214 {
3215 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3216 }
3217
3218 armnn::L2NormalizationDescriptor desc;
3219 desc.m_DataLayout = armnn::DataLayout::NHWC;
3220
3221 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003222 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3223 {
3224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3225 IsL2NormalizationSupported,
3226 data.m_Backends,
3227 isSupported,
3228 inputInfo,
3229 outputInfo,
3230 desc);
3231 };
3232
3233 if(!IsDynamicTensor(outputInfo))
3234 {
3235 validateFunc(outputInfo, isSupported);
3236 }
3237 else
3238 {
3239 isSupported = AreDynamicTensorsSupported();
3240 }
3241
Mike Kelly46272802019-08-14 17:00:48 +01003242 if (!isSupported)
3243 {
3244 return false;
3245 }
3246
3247 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3248 assert(layer != nullptr);
3249 input.Connect(layer->GetInputSlot(0));
3250
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003251 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003252}
3253
3254template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003255 typename HalOperation = typename HalPolicy::Operation,
3256 typename HalModel = typename HalPolicy::Model>
3257bool ConvertLocalResponseNormalization(const HalOperation& operation,
3258 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003259 ConversionData& data)
3260{
Mike Kelly999e2092019-08-15 10:46:46 +01003261 if (operation.inputs.size() != 5)
3262 {
3263 return Fail("%s: Optional inputs are not supported", __func__);
3264 }
3265
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003266 using HalOperand = typename HalPolicy::Operand;
3267 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003268
3269 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3270 if (!input.IsValid())
3271 {
3272 return Fail("%s: Operation has invalid inputs", __func__);
3273 }
3274
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003275 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003276 if (!output)
3277 {
3278 return Fail("%s: Could not read output 0", __func__);
3279 }
3280
3281 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3282 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3283
Mike Kelly46272802019-08-14 17:00:48 +01003284 if (outputInfo.GetNumDimensions() != 4u)
3285 {
3286 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3287 }
3288
3289 armnn::NormalizationDescriptor descriptor;
3290 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3291 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3292 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3293
3294 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003295 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003296 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3297 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3298 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3299 {
3300 return Fail("%s: Operation has invalid inputs", __func__);
3301 }
3302
3303 // ArmNN expects normSize to be the full size of the normalization
3304 // window rather than the radius as in AndroidNN.
3305 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3306
3307 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003308 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3309 {
3310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3311 IsNormalizationSupported,
3312 data.m_Backends,
3313 isSupported,
3314 inputInfo,
3315 outputInfo,
3316 descriptor);
3317 };
3318
3319 if(!IsDynamicTensor(outputInfo))
3320 {
3321 validateFunc(outputInfo, isSupported);
3322 }
3323 else
3324 {
3325 isSupported = AreDynamicTensorsSupported();
3326 }
3327
Mike Kelly46272802019-08-14 17:00:48 +01003328 if (!isSupported)
3329 {
3330 return false;
3331 }
3332
3333
3334 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3335 assert(layer != nullptr);
3336 input.Connect(layer->GetInputSlot(0));
3337
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003338 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003339}
3340
3341template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003342 typename HalOperation = typename HalPolicy::Operation,
3343 typename HalModel = typename HalPolicy::Model>
3344bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003345{
Mike Kelly46272802019-08-14 17:00:48 +01003346 armnn::ActivationDescriptor desc;
3347 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3348
3349 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3350}
3351
3352template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003353 typename HalOperation = typename HalPolicy::Operation,
3354 typename HalModel = typename HalPolicy::Model>
3355bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003356{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003357 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003358
3359 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3360 if (!input.IsValid())
3361 {
3362 return Fail("%s: Operation has invalid inputs", __func__);
3363 }
3364
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003365 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003366 if (!output)
3367 {
3368 return Fail("%s: Could not read output 0", __func__);
3369 }
3370
3371 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003372
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003373 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003374 if (!axisOperand)
3375 {
3376 return Fail("%s: Could not read input 1", __func__);
3377 }
3378
3379 std::vector<int32_t> axis;
3380 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3381 {
3382 return Fail("%s: Input 1 has invalid values", __func__);
3383 }
3384
3385 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3386
3387 // Convert the axis to unsigned int and remove duplicates.
3388 unsigned int rank = inputInfo.GetNumDimensions();
3389 std::set<unsigned int> uniqueAxis;
3390 std::transform(axis.begin(), axis.end(),
3391 std::inserter(uniqueAxis, uniqueAxis.begin()),
3392 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3393
3394 // Get the "keep dims" flag.
3395 int32_t keepDims = 0;
3396 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3397 {
3398 return Fail("%s: Could not read input 2", __func__);
3399 }
3400
3401 armnn::MeanDescriptor descriptor;
3402 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3403 descriptor.m_KeepDims = keepDims > 0;
3404
3405 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003406 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3407 {
3408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3409 IsMeanSupported,
3410 data.m_Backends,
3411 isSupported,
3412 inputInfo,
3413 outputInfo,
3414 descriptor);
3415 };
3416
3417 if(!IsDynamicTensor(outputInfo))
3418 {
3419 validateFunc(outputInfo, isSupported);
3420 }
3421 else
3422 {
3423 isSupported = AreDynamicTensorsSupported();
3424 }
3425
Mike Kelly46272802019-08-14 17:00:48 +01003426 if (!isSupported)
3427 {
3428 return false;
3429 }
3430
3431 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3432 assert(layer != nullptr);
3433 input.Connect(layer->GetInputSlot(0));
3434
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003435 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003436}
3437
3438template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003439 typename HalOperation = typename HalPolicy::Operation,
3440 typename HalModel = typename HalPolicy::Model>
3441bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003442{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003443 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003444
3445 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3446 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3447
3448 if (!input0.IsValid() || !input1.IsValid())
3449 {
3450 return Fail("%s: Operation has invalid inputs", __func__);
3451 }
3452
3453 // The FuseActivation parameter is always the input index 2
3454 // and it should be optional
3455 ActivationFn activationFunction;
3456 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3457 {
3458 return Fail("%s: Operation has invalid inputs", __func__);
3459 }
3460
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003461 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003462
3463 if (outputOperand == nullptr)
3464 {
3465 return false;
3466 }
3467
3468 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003469
3470 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003471 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3472 {
3473 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3474 IsMultiplicationSupported,
3475 data.m_Backends,
3476 isSupported,
3477 input0.GetTensorInfo(),
3478 input1.GetTensorInfo(),
3479 outputInfo);
3480 };
3481
3482 if(!IsDynamicTensor(outputInfo))
3483 {
3484 validateFunc(outputInfo, isSupported);
3485 }
3486 else
3487 {
3488 isSupported = AreDynamicTensorsSupported();
3489 }
3490
Mike Kelly46272802019-08-14 17:00:48 +01003491 if (!isSupported)
3492 {
3493 return false;
3494 }
3495
3496 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003497
Kevin Mayfcf2a152020-09-08 16:06:32 +01003498 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3499 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003500 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003501 return false;
3502 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003503
Kevin Mayfcf2a152020-09-08 16:06:32 +01003504 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3505 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003506}
3507
3508template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003509 typename HalOperation = typename HalPolicy::Operation,
3510 typename HalModel = typename HalPolicy::Model>
3511bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003512{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003513 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003514
Mike Kelly3c673942019-07-25 09:26:06 +01003515 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3516 if (!input.IsValid())
3517 {
3518 return Fail("%s: Operation has invalid inputs", __func__);
3519 }
3520
3521 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3522 unsigned int rank = inputInfo.GetNumDimensions();
3523
3524 armnn::PadDescriptor descriptor;
3525 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3526 {
3527 return Fail("%s: Could not convert paddings", __func__);
3528 }
3529
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003530 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3531 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003532 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3533 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3534 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003535 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003536 {
3537 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3538 }
3539
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003540 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003541 if (!output)
3542 {
3543 return Fail("%s: Could not read output", __func__);
3544 }
3545
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003546 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003547
3548 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003549 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3550 {
3551 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3552 IsPadSupported,
3553 data.m_Backends,
3554 isSupported,
3555 inputInfo,
3556 outputInfo,
3557 descriptor);
3558 };
3559
3560 if(!IsDynamicTensor(outputInfo))
3561 {
3562 validateFunc(outputInfo, isSupported);
3563 }
3564 else
3565 {
3566 isSupported = AreDynamicTensorsSupported();
3567 }
3568
Mike Kelly3c673942019-07-25 09:26:06 +01003569 if (!isSupported)
3570 {
3571 return false;
3572 }
3573
3574 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3575 assert(layer != nullptr);
3576 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003577
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003578 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003579}
3580
Mike Kelly0a879362019-07-29 16:56:31 +01003581template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003582 typename HalOperation = typename HalPolicy::Operation,
3583 typename HalModel = typename HalPolicy::Model>
3584bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003585{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003586 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003587
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003588 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3589 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3590 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003591
3592 if (inputOperand == nullptr
3593 || requestedShapeOperand == nullptr
3594 || outputOperand == nullptr)
3595 {
3596 return Fail("%s: Operation has invalid inputs", __func__);
3597 }
3598
3599 if (requestedShapeOperand->dimensions.size() != 1)
3600 {
3601 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3602 __func__, requestedShapeOperand->dimensions.size());
3603 }
3604
3605 std::vector<int32_t> targetDimensions;
3606 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3607 {
3608 return Fail("%s: Could not read values of input 1", __func__);
3609 }
3610
3611 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3612
3613 Shape requestedShape;
3614 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3615 // function that resolves these values into a fully specified tensor shape.
3616 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3617 {
3618 return Fail("%s: Failed to resolve the requested shape", __func__);
3619 }
3620
Mike Kelly46272802019-08-14 17:00:48 +01003621 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3622 if (!input.IsValid())
3623 {
3624 return Fail("%s: Could not read input 0", __func__);
3625 }
3626
3627 armnn::ReshapeDescriptor reshapeDescriptor;
3628 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3629 requestedShape.dimensions.data());
3630
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003631 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3632
Mike Kelly46272802019-08-14 17:00:48 +01003633 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003634 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3635 {
3636 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3637 IsReshapeSupported,
3638 data.m_Backends,
3639 isSupported,
3640 input.GetTensorInfo(),
3641 outputInfo,
3642 reshapeDescriptor);
3643 };
3644
3645 if(!IsDynamicTensor(outputInfo))
3646 {
3647 validateFunc(outputInfo, isSupported);
3648 }
3649 else
3650 {
3651 isSupported = AreDynamicTensorsSupported();
3652 }
3653
Mike Kelly46272802019-08-14 17:00:48 +01003654 if (!isSupported)
3655 {
3656 return false;
3657 }
3658
3659 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3660 assert(layer != nullptr);
3661 input.Connect(layer->GetInputSlot(0));
3662
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003663 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003664}
3665
3666template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003667 typename HalOperation = typename HalPolicy::Operation,
3668 typename HalModel = typename HalPolicy::Model>
3669bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003670{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003671 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003672
Mike Kelly0a879362019-07-29 16:56:31 +01003673 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3674 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3675
3676 if (!input0.IsValid() || !input1.IsValid())
3677 {
3678 return Fail("%s: Operation has invalid inputs", __func__);
3679 }
3680
3681 // The FuseActivation parameter is always the input index 2
3682 // and it should be optional
3683 ActivationFn activationFunction;
3684 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3685 {
3686 return Fail("%s: Operation has invalid inputs", __func__);
3687 }
3688
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003689 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003690 if (!output)
3691 {
3692 return Fail("%s: Could not read output 0", __func__);
3693 }
3694
3695 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003696
3697 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003698 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3699 {
3700 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3701 IsSubtractionSupported,
3702 data.m_Backends,
3703 isSupported,
3704 input0.GetTensorInfo(),
3705 input1.GetTensorInfo(),
3706 outputInfo);
3707 };
3708
3709 if(IsDynamicTensor(outputInfo))
3710 {
3711 isSupported = AreDynamicTensorsSupported();
3712 }
3713 else
3714 {
3715 validateFunc(outputInfo, isSupported);
3716 }
3717
Mike Kelly0a879362019-07-29 16:56:31 +01003718 if (!isSupported)
3719 {
3720 return false;
3721 }
3722
3723 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003724
Kevin Mayfcf2a152020-09-08 16:06:32 +01003725 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3726 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003727 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003728 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003729 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003730 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3731 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003732}
3733
Finn Williams23b87b32019-07-30 11:44:05 +01003734template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003735 typename HalOperation = typename HalPolicy::Operation,
3736 typename HalModel = typename HalPolicy::Model>
3737bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003738{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003739 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003740
3741 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3742 if (!input.IsValid())
3743 {
3744 return Fail("%s: Operation has invalid inputs", __func__);
3745 }
3746
3747 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3748 unsigned int rank = inputInfo.GetNumDimensions();
3749 if (rank > 4)
3750 {
3751 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3752 }
3753
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003754 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003755 if (!output)
3756 {
3757 return Fail("%s: Could not read output 0", __func__);
3758 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003759
3760 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003761 {
3762 return Fail("%s: Dynamic output tensors are not supported", __func__);
3763 }
3764
3765 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3766 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003767 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003768
3769 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3770
3771 std::vector<int32_t> axis;
3772 if (!axisOperand)
3773 {
3774 axis.assign(dimensionSequence,
3775 dimensionSequence + rank);
3776 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003777 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003778 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003779 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003780 }
3781
3782 std::vector<uint32_t> outputDims;
3783 for (unsigned int i = 0; i < rank; i++)
3784 {
3785 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3786 auto currentDimension = inputInfo.GetShape()[i];
3787 if (skipSqueeze || currentDimension != 1)
3788 {
3789 outputDims.push_back(currentDimension);
3790 }
3791 }
3792
3793 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3794
3795 armnn::TensorInfo outputInfo = inputInfo;
3796 outputInfo.SetShape(outShape);
3797
3798 armnn::ReshapeDescriptor reshapeDesc;
3799 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3800
3801 bool isSupported = false;
3802 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3803 IsReshapeSupported,
3804 data.m_Backends,
3805 isSupported,
3806 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003807 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003808 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003809
Mike Kelly46272802019-08-14 17:00:48 +01003810 if (!isSupported)
3811 {
3812 return false;
3813 }
3814
3815 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3816 assert(layer != nullptr);
3817 input.Connect(layer->GetInputSlot(0));
3818
3819 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3820}
3821
3822template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003823 typename HalOperation = typename HalPolicy::Operation,
3824 typename HalModel = typename HalPolicy::Model>
3825bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003826{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003827 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003828
3829 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3830 if (!input.IsValid())
3831 {
3832 return Fail("%s: Operation has invalid inputs", __func__);
3833 }
3834
3835 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3836 unsigned int rank = inputInfo.GetNumDimensions();
3837 if (rank > 4)
3838 {
3839 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3840 }
3841
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003842 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003843 if (!output)
3844 {
3845 return Fail("%s: Could not read output 0", __func__);
3846 }
3847
3848 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003849
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003850 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3851 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3852 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003853
3854 std::vector<int32_t> beginValues;
3855 std::vector<int32_t> endValues;
3856 std::vector<int32_t> stridesValues;
3857
3858 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003859 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003860 {
3861 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3862 {
3863 return false;
3864 }
3865
3866 if (operandValues.size() != rank)
3867 {
3868 return false;
3869 }
3870
3871 return true;
3872 };
3873
3874 if (!ValidateInputOperands(*beginOperand, beginValues)
3875 || !ValidateInputOperands(*endOperand, endValues)
3876 || !ValidateInputOperands(*stridesOperand, stridesValues))
3877 {
3878 return Fail("%s: Operation has invalid input operand", __func__);
3879 }
3880
3881 // Stride cannot have value '0'
3882 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3883 {
3884 return Fail("%s: Stride must be non-zero value.", __func__);
3885 }
3886
3887 armnn::StridedSliceDescriptor descriptor;
3888 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3889 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3890 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3891 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3892
3893 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3894 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3895 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3896 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3897 {
3898 return Fail("%s: Operation has invalid inputs", __func__);
3899 }
3900
3901 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003902 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3903 {
3904 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3905 IsStridedSliceSupported,
3906 data.m_Backends,
3907 isSupported,
3908 inputInfo,
3909 outputInfo,
3910 descriptor);
3911 };
3912
3913 if(IsDynamicTensor(outputInfo))
3914 {
3915 isSupported = AreDynamicTensorsSupported();
3916 }
3917 else
3918 {
3919 validateFunc(outputInfo, isSupported);
3920 }
3921
Mike Kelly46272802019-08-14 17:00:48 +01003922 if (!isSupported)
3923 {
3924 return false;
3925 }
3926
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003927 // Check if slice can fit in a inferred output
3928 armnn::TensorShape inputShape = inputInfo.GetShape();
3929 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3930 {
3931 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003932
3933 if (descriptor.m_ShrinkAxisMask & (1 << i))
3934 {
3935 // If the difference between the start point and the end point of the slice on an axis being shrunk
3936 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3937 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3938 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3939 {
3940 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3941 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003942
3943 if(stride < 0)
3944 {
3945 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3946 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003947 }
3948 }
3949
Mike Kelly46272802019-08-14 17:00:48 +01003950 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3951 assert(layer != nullptr);
3952 input.Connect(layer->GetInputSlot(0));
3953
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003954 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003955}
3956
3957template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003958 typename HalOperation = typename HalPolicy::Operation,
3959 typename HalModel = typename HalPolicy::Model>
3960bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003961{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003962 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003963 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003964
3965 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3966 if (!input.IsValid())
3967 {
3968 return Fail("%s: Operation has invalid inputs", __func__);
3969 }
3970
3971 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3972 unsigned int rank = inputInfo.GetNumDimensions();
3973 if (rank > 4)
3974 {
3975 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3976 }
3977
3978 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3979 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003980 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003981
3982 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003983 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003984 {
Mike Kelly46272802019-08-14 17:00:48 +01003985 for (unsigned int i = rank; i > 0; i--)
3986 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003987 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003988 }
3989 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003990 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003991 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003992 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003993 }
3994
3995 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3996
Mike Kelly4a956582020-02-28 10:32:09 +00003997 armnn::TransposeDescriptor transposeDesc;
3998 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003999
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004000 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004001 if (!output)
4002 {
4003 return Fail("%s: Could not read output 0", __func__);
4004 }
4005
4006 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4007
4008 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004009 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4010 {
4011 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4012 IsTransposeSupported,
4013 data.m_Backends,
4014 isSupported,
4015 inputInfo,
4016 outputInfo,
4017 transposeDesc);
4018 };
4019
4020 if(IsDynamicTensor(outputInfo))
4021 {
4022 isSupported = AreDynamicTensorsSupported();
4023 }
4024 else
4025 {
4026 validateFunc(outputInfo, isSupported);
4027 }
4028
Mike Kelly46272802019-08-14 17:00:48 +01004029 if (!isSupported)
4030 {
4031 return false;
4032 }
4033
Mike Kelly4a956582020-02-28 10:32:09 +00004034 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01004035 assert(layer != nullptr);
4036 input.Connect(layer->GetInputSlot(0));
4037
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004038 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004039}
4040
4041template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004042 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004043 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004044 typename HalModel = typename HalPolicy::Model>
4045bool ConvertBatchToSpaceNd(const HalOperation& operation,
4046 const HalModel& model,
4047 ConversionData& data)
4048{
Finn Williams23b87b32019-07-30 11:44:05 +01004049
4050 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4051 if (!input.IsValid())
4052 {
4053 return Fail("%s: Operation has invalid inputs", __func__);
4054 }
4055
4056 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4057 if (!output)
4058 {
4059 return Fail("%s: Could not read output 0", __func__);
4060 }
4061
4062 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004063
4064 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4065 if (!blockOperand)
4066 {
4067 return Fail("%s: Could not read input 1", __func__);
4068 }
4069
4070 // Convert the block operand to int32
4071 std::vector<int32_t> block;
4072 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4073 {
4074 return Fail("%s: Input 1 has invalid values", __func__);
4075 }
4076
4077 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4078
4079 unsigned int rank = inputInfo.GetNumDimensions();
4080 if (rank != 4)
4081 {
4082 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4083 }
4084
4085 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4086 {
4087 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4088 " greater than or equal to 1", __func__);
4089 }
4090
4091 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4092 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4093 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4094
Kevin May42477c12020-03-26 13:34:14 +00004095 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004096 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004097 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004098 }
4099 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4100 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4101
4102 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004103 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4104 {
4105 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4106 IsBatchToSpaceNdSupported,
4107 data.m_Backends,
4108 isSupported,
4109 inputInfo,
4110 outputInfo,
4111 batchToSpaceNdDesc);
4112 };
4113
4114 if(!IsDynamicTensor(outputInfo))
4115 {
4116 validateFunc(outputInfo, isSupported);
4117 }
4118 else
4119 {
4120 isSupported = AreDynamicTensorsSupported();
4121 }
4122
4123
Finn Williams23b87b32019-07-30 11:44:05 +01004124 if (!isSupported)
4125 {
4126 return false;
4127 }
4128
4129 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4130 assert(layer != nullptr);
4131 input.Connect(layer->GetInputSlot(0));
4132
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004133 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004134}
Mike Kelly0a879362019-07-29 16:56:31 +01004135
Finn Williamsd74c5052019-07-30 17:06:00 +01004136template<typename HalPolicy,
4137 typename HalOperation = typename HalPolicy::Operation,
4138 typename HalOperand = typename HalPolicy::Operand,
4139 typename HalModel = typename HalPolicy::Model>
4140bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4141{
4142 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4143 if (!input.IsValid())
4144 {
4145 return Fail("%s: Operation has invalid inputs", __func__);
4146 }
4147
4148 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4149 unsigned int rank = inputInfo.GetNumDimensions();
4150 unsigned int spatialDim = rank - 2;
4151
4152 if (rank != 4)
4153 {
4154 Fail("%s: Only inputs with rank 4 are supported", __func__);
4155 }
4156
4157 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4158 if (!output)
4159 {
4160 return Fail("%s: Could not read output 0", __func__);
4161 }
4162
4163 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004164
4165 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4166 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4167
4168 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4169 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4170 {
4171 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4172 }
4173
4174 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004175 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4176 {
4177 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4178 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004179 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4180 {
4181 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4182 }
4183
4184 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4185 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4186 {
4187 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4188 }
4189
4190 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4191 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004192 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4193 {
4194 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4195 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004196 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4197 {
4198 int paddingBeforeInput = paddings[i];
4199 int paddingAfterInput = paddings[i + 1];
4200 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4201 {
4202 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4203 }
4204
4205 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4206 }
4207
4208 armnn::SpaceToBatchNdDescriptor descriptor;
4209 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4210 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4211 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4212
Kevin May42477c12020-03-26 13:34:14 +00004213 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004214 {
4215 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4216 }
4217
4218 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004219 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4220 {
4221 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4222 IsSpaceToBatchNdSupported,
4223 data.m_Backends,
4224 isSupported,
4225 inputInfo,
4226 outputInfo,
4227 descriptor);
4228 };
4229
4230 if(IsDynamicTensor(outputInfo))
4231 {
4232 isSupported = AreDynamicTensorsSupported();
4233 }
4234 else
4235 {
4236 validateFunc(outputInfo, isSupported);
4237 }
4238
Finn Williamsd74c5052019-07-30 17:06:00 +01004239 if (!isSupported)
4240 {
4241 return false;
4242 }
4243
4244 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4245 assert(layer != nullptr);
4246 input.Connect(layer->GetInputSlot(0));
4247
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004248 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004249}
4250
saoste01b8471482018-10-10 09:44:51 +01004251} // namespace armnn_driver