blob: 0f6b783d897464e554b284267765b0ef3e0534f0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010012#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000013#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010014#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
James Ward4e22f602020-10-20 15:50:33 +010025#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026
27#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010028#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010029
30namespace armnn_driver
31{
32
33///
34/// Helper classes
35///
36
Kevin Mayec1e5b82020-02-26 17:00:39 +000037#ifdef ARMNN_ANDROID_R
38using OperandType = android::nn::hal::OperandType;
39#endif
40
Sadik Armagan188675f2021-02-12 17:16:42 +000041#ifdef ARMNN_ANDROID_S
42#include <nnapi/Types.h>
43#endif
44
45
arovir01b0717b52018-09-05 17:03:25 +010046struct ConversionData
47{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010048 ConversionData(const std::vector<armnn::BackendId>& backends)
49 : m_Backends(backends)
50 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010051 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010052 {}
53
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010054 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010055 armnn::INetworkPtr m_Network;
56 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
57 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010058 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010059};
60
61class LayerInputHandle
62{
63public:
64 LayerInputHandle();
65 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
66
67 bool IsValid() const;
68
69 void Connect(armnn::IInputSlot& inputSlot);
70
Finn Williamsa4983ce2020-07-23 12:55:12 +010071 void Disconnect(armnn::IInputSlot& inputSlot);
72
arovir01b0717b52018-09-05 17:03:25 +010073 const armnn::TensorInfo& GetTensorInfo() const;
74
75private:
76 armnn::IOutputSlot* m_OutputSlot;
77 bool m_Valid;
78 armnn::TensorInfo m_TensorInfo;
79};
80
81class ConstTensorPin
82{
83public:
84 // Creates an invalid tensor pin (can be used to signal errors)
85 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
86 ConstTensorPin(bool optional = false);
87
88 // @param tensorInfo TensorInfo associated with the tensor.
89 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
90 // the model being converted.
91 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010092 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010093 const armnn::PermutationVector& mappings);
94
95 ConstTensorPin(const ConstTensorPin& other) = delete;
96 ConstTensorPin(ConstTensorPin&& other) = default;
97
98 bool IsValid() const;
99 bool IsOptional() const;
100
101 const armnn::ConstTensor& GetConstTensor() const;
102 const armnn::ConstTensor* GetConstTensorPtr() const;
103
104private:
105 armnn::ConstTensor m_ConstTensor;
106
107 // Owned memory for swizzled tensor data, only required if the tensor needed
108 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
109 // the pools associated with the model being converted.
110 std::vector<uint8_t> m_SwizzledTensorData;
111
112 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
113 bool m_Optional;
114};
115
116} // namespace armnn_driver
117
118///
119/// Utility functions
120///
121
122namespace
123{
124
125using namespace armnn_driver;
126using namespace android::nn;
127
128// Convenience function to log the reason for failing to convert a model.
129// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
130template<class... Args>
131static bool Fail(const char* formatStr, Args&&... args)
132{
133 ALOGD(formatStr, std::forward<Args>(args)...);
134 return false;
135}
136
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100137// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
138// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
139#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140try \
141{ \
142 for (auto&& backendId : backends) \
143 { \
144 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000145 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100146 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100147 std::string reasonIfUnsupported; \
148 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000149 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100150 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100151 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 } \
154 else \
155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 if (reasonIfUnsupported.size() > 0) \
157 { \
158 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
159 } \
160 else \
161 { \
162 ALOGD("%s: not supported by armnn", funcName); \
163 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100164 } \
165 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100166 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100167 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100168 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100169 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100170 } \
171 if (!supported) \
172 { \
173 ALOGD("%s: not supported by any specified backend", funcName); \
174 } \
175} \
176catch (const armnn::InvalidArgumentException &e) \
177{ \
178 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
179}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100180
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000181template<typename HalOperand>
182armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100183{
184 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
185}
186
Matthew Bentham912b3622019-05-03 15:49:14 +0100187inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100188{
Matthew Bentham912b3622019-05-03 15:49:14 +0100189 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
190 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
191 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100192}
193
Kevin May42477c12020-03-26 13:34:14 +0000194#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100195
Keith Davis71006492020-01-06 17:44:16 +0000196// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100197inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
198{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000200 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 type == V1_2::OperandType::TENSOR_FLOAT16 ||
202 type == V1_2::OperandType::TENSOR_FLOAT32 ||
203 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000204 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000205 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
206 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207 type == V1_2::OperandType::TENSOR_INT32;
208}
209
210#endif
211
Kevin May42477c12020-03-26 13:34:14 +0000212#ifdef ARMNN_ANDROID_NN_V1_3
213
214// Support within the 1.3 driver for specific tensor data types
215inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
216{
217 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100218 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000219 type == V1_3::OperandType::TENSOR_FLOAT16 ||
220 type == V1_3::OperandType::TENSOR_FLOAT32 ||
221 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100222 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000223 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
224 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
225 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
226 type == V1_3::OperandType::TENSOR_INT32;
227}
228
229#endif
230
Mike Kellyb5fdf382019-06-11 16:35:25 +0100231inline bool IsBool(V1_0::Operand)
232{
233 return false;
234}
235
Kevin May42477c12020-03-26 13:34:14 +0000236inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100237{
238 return false;
239}
240
Kevin May42477c12020-03-26 13:34:14 +0000241#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100242
243inline bool IsBool(V1_2::Operand operand)
244{
245 return operand.type == V1_2::OperandType::BOOL;
246}
247
Sadik Armagan61113162019-07-25 09:09:40 +0100248/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000249inline bool Is12OrLaterOperand(V1_2::Operand)
250{
251 return true;
252}
253
254#endif
255
256#ifdef ARMNN_ANDROID_NN_V1_3
257
258inline bool IsBool(V1_3::Operand operand)
259{
260 return operand.type == V1_3::OperandType::BOOL;
261}
262
263/// Checks if a operand is 1_2 Operand
264inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100265{
266 return true;
267}
268
Mike Kellyb5fdf382019-06-11 16:35:25 +0100269#endif
270
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100271template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000272armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
273 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274 armnn::TensorInfo reshapeInfo)
275{
276 armnn::ReshapeDescriptor reshapeDescriptor;
277 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
278
279 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100280 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100281
282 // Attach the input layer to the reshape layer
283 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
284 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
285
286 return *reshapeLayer;
287}
288
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000289bool BroadcastTensor(LayerInputHandle& input0,
290 LayerInputHandle& input1,
291 armnn::IConnectableLayer* startLayer,
292 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100293{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100294 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100295
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
297 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
298
299 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
300 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
301
302 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100303 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
305 input0.Connect(startLayer->GetInputSlot(0));
306 input1.Connect(startLayer->GetInputSlot(1));
307
Sadik Armagan64b19b52019-08-19 09:49:58 +0100308 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100309 }
310
311 // Since the number of dimensions do not match then we need to add degenerate dimensions
312 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
313
314 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100315 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
316 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317
318 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
319 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
320 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
321
322 const armnn::TensorShape& smallShape = smallInfo.GetShape();
323 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
324 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
325 {
326 reshapedDimensions[i] = smallShape[i - sizeDifference];
327 }
328
329 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100330 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100331 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100332
333 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
334 armnn::ReshapeDescriptor reshapeDescriptor;
335
336 bool isSupported = false;
337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
338 IsReshapeSupported,
339 data.m_Backends,
340 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000341 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 reshapedInfo,
343 reshapeDescriptor);
344 if (!isSupported)
345 {
346 return false;
347 }
348
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100349 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100350 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100351
352 if (input0IsSmaller)
353 {
354 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
355 //
356 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100357 // | |
358 // Reshape |
359 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100360 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100361
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100362 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
363 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100364 }
365 else
366 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100367 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
368 //
369 // Input0 Input1
370 // | |
371 // | Reshape
372 // \ /
373 // StartLayer
374
arovir01b0717b52018-09-05 17:03:25 +0100375 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100377 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100378
379 return true;
arovir01b0717b52018-09-05 17:03:25 +0100380}
381
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000382void CalcPadding(uint32_t input,
383 uint32_t kernel,
384 uint32_t stride,
385 uint32_t& outPadHead,
386 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100387 android::nn::PaddingScheme scheme)
388{
389 int32_t padHead;
390 int32_t padTail;
391 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100392 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
393 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100394}
395
Kevin May42477c12020-03-26 13:34:14 +0000396#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100397
398void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
399 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
400{
401 int32_t padHead;
402 int32_t padTail;
403 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100404 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
405 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100406}
407
Mike Kelly26123db2020-01-15 10:02:33 +0000408void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100409 int32_t& outPadTail, android::nn::PaddingScheme scheme)
410{
411 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
412}
413
Mike Kelly86b36d42019-07-12 16:39:33 +0100414#endif
415
Matthew Bentham912b3622019-05-03 15:49:14 +0100416Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100417{
418 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100419 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100420 shape.dimensions = operand.dimensions;
421 shape.scale = operand.scale;
422 shape.offset = operand.zeroPoint;
423 return shape;
424}
425
Kevin May42477c12020-03-26 13:34:14 +0000426#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100427
428Shape GetOperandShape(const V1_2::Operand& operand)
429{
430 Shape shape;
431 shape.type = OperandType(operand.type);
432 shape.dimensions = operand.dimensions;
433 shape.scale = operand.scale;
434 shape.offset = operand.zeroPoint;
435 return shape;
436}
437
438#endif
439
Kevin May42477c12020-03-26 13:34:14 +0000440#ifdef ARMNN_ANDROID_NN_V1_3
441
442Shape GetOperandShape(const V1_3::Operand& operand)
443{
444 Shape shape;
445 shape.type = OperandType(operand.type);
446 shape.dimensions = operand.dimensions;
447 shape.scale = operand.scale;
448 shape.offset = operand.zeroPoint;
449 return shape;
450}
451
452#endif
453
arovir01b0717b52018-09-05 17:03:25 +0100454// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
455// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100456// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
457// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100458void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000459 const armnn::TensorInfo& weightInfo,
460 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100461{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000462 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100463 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 // NOTE: Bias scale is always set to 0 for per-axis quantization and
465 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
466 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100467 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000468 return biasScale * inputInfo.GetQuantizationScale();
469 };
470
471 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
472 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
473
474 biasInfo.SetQuantizationScales(biasScales);
475 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
476
477 ALOGV("Bias quantization params have been updated for per-axis quantization");
478 }
479 else
480 {
481 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
482 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
483 {
James Ward4e22f602020-10-20 15:50:33 +0100484 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000485 {
486 ALOGW("Bias quantization scale has been modified to match input * weights");
487 biasInfo.SetQuantizationScale(expectedBiasScale);
488 }
arovir01b0717b52018-09-05 17:03:25 +0100489 }
490 }
491}
492
493// 4D Tensor Permutations
494const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100495const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100496const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
497
498// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000499const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
500const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100501
502template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000503armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
504 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100505{
506 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000507 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100508
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100509 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100510
511 // Connect input to swizzle layer
512 input.Connect(layer->GetInputSlot(0));
513
514 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000515 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100516 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
517
518 return *layer;
519}
520
arovir01b0717b52018-09-05 17:03:25 +0100521bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
522 const armnn::TensorShape & outputShape,
523 uint32_t concatDim)
524{
525 // Validate the output shape is correct given the input shapes (which have just been validated)
526 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
527 if (outputShape.GetNumDimensions() != numDimensions)
528 {
529 return Fail("%s: Output shape has wrong number of dimensions", __func__);
530 }
531
532 unsigned int outputSizeAlongConcatenatedDimension = 0;
533 for (unsigned int i = 0; i < inputShapes.size(); i++)
534 {
535 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
536 }
537
538 for (unsigned int i = 0; i < numDimensions; ++i)
539 {
540 if (i == concatDim)
541 {
542 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
543 {
544 return Fail(
545 "%s: Invalid output shape for dimension %d (%d != %d)",
546 __func__,
547 i,
548 outputShape[i],
549 outputSizeAlongConcatenatedDimension);
550 }
551 }
552 else
553 {
554 if (outputShape[i] != inputShapes[0][i])
555 {
556 return Fail("%s: Invalid output shape", __func__);
557 }
558 }
559 }
560
561 return true;
562}
563
564bool RequiresReshape(armnn::TensorShape & inputShape)
565{
566 return inputShape.GetNumDimensions() < 3;
567}
568
arovir01b0717b52018-09-05 17:03:25 +0100569void SwizzleInputs(armnn::INetwork& network,
570 std::vector<LayerInputHandle>& inputs,
571 std::vector<armnn::TensorShape>& inputShapes,
572 const armnn::PermutationVector& mapping)
573{
574 if (!mapping.IsEqual(IdentityPermutation4D))
575 {
576 size_t nInputs = inputs.size();
577 for (size_t i=0; i<nInputs; ++i)
578 {
579 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000580 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100581 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
582 auto& outputInfo = outputSlot.GetTensorInfo();
583 // replace inputs with the swizzled ones
584 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
585 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
586 }
587 }
588}
589
Teresa Charlin185f5882020-04-06 21:59:18 +0100590bool TransposeInputTensors(ConversionData& data,
591 std::vector<LayerInputHandle>& inputs,
592 std::vector<armnn::TensorShape>& inputShapes,
593 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000594{
David Monahan7f492ac2020-10-16 10:36:29 +0100595 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
596 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000597 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100598 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000599 size_t nInputs = inputs.size();
600 for (size_t i=0; i<nInputs; ++i)
601 {
602 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000603 armnn::TransposeDescriptor transposeDesc;
604 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100605 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606
607 bool isSupported = false;
608 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000609 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000610 data.m_Backends,
611 isSupported,
612 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100613 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000614 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000615 if (!isSupported)
616 {
617 return false;
618 }
619
620 }
621 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
622 }
623 return true;
624}
625
626
narpra01f176d5a2018-11-18 20:17:48 +0000627bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
628 int32_t & concatDimension,
629 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100630{
narpra01f176d5a2018-11-18 20:17:48 +0000631 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100632 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100633
634 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000635 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
636 // or along dimension 0 or 2 for a 3-D tensor.
637 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100638 {
narpra01f176d5a2018-11-18 20:17:48 +0000639 concatDimension = 1;
640 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
641 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100642 }
narpra01f176d5a2018-11-18 20:17:48 +0000643 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100644 {
narpra01f176d5a2018-11-18 20:17:48 +0000645 concatDimension = 0;
646 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
647 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100648 }
David Monahan7f492ac2020-10-16 10:36:29 +0100649 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
650 // permutation identity to only have 3 dimensions
651 else if (numberOfDimensions == 3 && concatDimension == 2)
652 {
653 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
654 }
narpra01f176d5a2018-11-18 20:17:48 +0000655 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100656}
657
658} // anonymous namespace
659
660namespace armnn_driver
661{
662
663//// Creates an ArmNN activation layer and connects it to the given layer, if the
664//// passed in AndroidNN activation function requires so.
665//// @return The end layer of the sequence of layers built for the given AndroidNN
666//// activation function or nullptr if an error occurred (e.g. unsupported activation).
667//// Note that the end layer matches the input layer if no activation is required
668//// (the sequence of layers has length 1).
669armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
670 ActivationFn activation,
671 armnn::IConnectableLayer* prevLayer,
672 ConversionData& data);
673
674} // namespace armnn_driver
675
676///
677/// Utility templates
678///
679
680namespace armnn_driver
681{
682
683using namespace android::nn;
684
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100685template<typename HalPolicy,
686 typename HalOperand = typename HalPolicy::Operand,
687 typename HalOperation = typename HalPolicy::Operation,
688 typename HalModel = typename HalPolicy::Model>
689const HalOperand* GetInputOperand(const HalOperation& operation,
690 uint32_t inputIndex,
691 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100692 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100693{
694 if (inputIndex >= operation.inputs.size())
695 {
saoste01b8471482018-10-10 09:44:51 +0100696 if (failOnIndexOutOfBounds)
697 {
698 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
699 }
arovir01b0717b52018-09-05 17:03:25 +0100700 return nullptr;
701 }
702
Kevin May42477c12020-03-26 13:34:14 +0000703 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100704 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000705 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100706}
707
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100708template<typename HalPolicy,
709 typename HalOperand = typename HalPolicy::Operand,
710 typename HalOperation = typename HalPolicy::Operation,
711 typename HalModel = typename HalPolicy::Model>
712const HalOperand* GetOutputOperand(const HalOperation& operation,
713 uint32_t outputIndex,
714 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100715{
716 if (outputIndex >= operation.outputs.size())
717 {
718 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
719 return nullptr;
720 }
721
722 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100723 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100724
Kevin May42477c12020-03-26 13:34:14 +0000725 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100726}
727
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100728template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100729 typename HalOperand = typename HalPolicy::Operand,
730 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100731const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100732 const HalModel& model,
733 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000734 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100735{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100736 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100737
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100739 switch (operand.lifetime)
740 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100742 {
743 // Constant found in model.operandValues
744 valueStart = &model.operandValues[operand.location.offset];
745 break;
746 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100747 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100748 {
749 // Constant specified via a Memory object
750 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
751 break;
752 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000754 {
755 // An optional input tensor with no values is not an error so should not register as a fail
756 if (optional)
757 {
758 valueStart = nullptr;
759 break;
760 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100761 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000762 }
arovir01b0717b52018-09-05 17:03:25 +0100763 default:
764 {
765 // Unsupported/invalid (e.g. can't get value of an input to the model)
766 Fail("%s: unsupported/invalid operand lifetime: %s",
767 __func__, toString(operand.lifetime).c_str());
768 valueStart = nullptr;
769 }
770 }
771
772 return valueStart;
773}
774
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100776 typename HalOperation = typename HalPolicy::Operation,
777 typename HalModel = typename HalPolicy::Model,
778 typename HalOperandType = typename HalPolicy::OperandType>
779bool GetOperandType(const HalOperation& operation,
780 uint32_t inputIndex,
781 const HalModel& model,
782 HalOperandType& type)
783{
784 using HalOperand = typename HalPolicy::Operand;
785
786 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
787 if (!operand)
788 {
789 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
790 }
791
792 type = operand->type;
793 return true;
794}
795
796template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000797 typename HalOperand = typename HalPolicy::Operand>
798bool IsOperandConstant(const HalOperand& operand)
799{
800 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
801
802 HalOperandLifeTime lifetime = operand.lifetime;
803
804 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
805 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
806 lifetime == HalOperandLifeTime::NO_VALUE;
807}
808
809template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100810 typename HalOperand = typename HalPolicy::Operand,
811 typename HalModel = typename HalPolicy::Model>
812ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
813 const HalModel& model,
814 const ConversionData& data,
815 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
816 const armnn::TensorShape* overrideTensorShape = nullptr,
817 bool optional = false)
818{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100819 if (!IsOperandTypeSupportedForTensors(operand.type))
820 {
821 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
822 return ConstTensorPin();
823 }
824
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000825 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 {
827 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
828 return ConstTensorPin();
829 }
830
831 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
832 if (!valueStart)
833 {
834 if (optional)
835 {
836 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
837 return ConstTensorPin(true);
838 }
839 // mandatory tensor with no values
840 Fail("%s: failed to get operand address", __func__);
841 return ConstTensorPin();
842 }
843
844 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000845
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846 if (overrideTensorShape != nullptr)
847 {
848 tensorInfo.SetShape(*overrideTensorShape);
849 }
850 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
851}
852
853template<typename HalPolicy,
854 typename HalOperation = typename HalPolicy::Operation,
855 typename HalModel = typename HalPolicy::Model>
856ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
857 uint32_t inputIndex,
858 const HalModel& model,
859 const ConversionData& data,
860 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
861 const armnn::TensorShape* overrideTensorShape = nullptr,
862 bool optional = false)
863{
864 using HalOperand = typename HalPolicy::Operand;
865
866 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
867 if (!operand)
868 {
869 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
870 return ConstTensorPin();
871 }
872 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
873 model,
874 data,
875 dimensionMappings,
876 overrideTensorShape,
877 optional);
878}
879
880template<typename HalPolicy,
881 typename OutputType,
882 typename HalOperandType = typename HalPolicy::OperandType,
883 typename HalOperation = typename HalPolicy::Operation,
884 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100885bool GetInputScalar(const HalOperation& operation,
886 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100887 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100888 OutputType& outValue,
889 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100890 const ConversionData& data,
891 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100892{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100893 using HalOperand = typename HalPolicy::Operand;
894
895 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100896 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100897 {
898 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
899 }
900
Sadik Armagan813f2302020-05-19 14:10:30 +0100901 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100902 {
903 return Fail("%s: unexpected operand type: %s (should be %s)",
904 __func__, toString(operand->type).c_str(), toString(type).c_str());
905 }
906
Sadik Armagan813f2302020-05-19 14:10:30 +0100907 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100908 {
909 return Fail("%s: incorrect operand location length: %i (should be %i)",
910 __func__, operand->location.length, sizeof(OutputType));
911 }
912
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100913 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100914 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100915 {
916 return Fail("%s: failed to get address for operand", __func__);
917 }
918
Sadik Armagan813f2302020-05-19 14:10:30 +0100919 if(!optional)
920 {
921 outValue = *(static_cast<const OutputType*>(valueAddress));
922 }
923
arovir01b0717b52018-09-05 17:03:25 +0100924 return true;
925}
926
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100927template<typename HalPolicy,
928 typename HalOperation = typename HalPolicy::Operation,
929 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100930bool GetInputInt32(const HalOperation& operation,
931 uint32_t inputIndex,
932 int32_t& outValue,
933 const HalModel& model,
934 const ConversionData& data)
935{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100937}
938
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100939template<typename HalPolicy,
940 typename HalOperation = typename HalPolicy::Operation,
941 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100942bool GetInputFloat32(const HalOperation& operation,
943 uint32_t inputIndex,
944 float& outValue,
945 const HalModel& model,
946 const ConversionData& data)
947{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100948 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100949}
950
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100951template<typename HalPolicy,
952 typename HalOperation = typename HalPolicy::Operation,
953 typename HalOperandType = typename HalPolicy::OperandType,
954 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100955bool GetInputActivationFunctionImpl(const HalOperation& operation,
956 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100957 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100958 ActivationFn& outActivationFunction,
959 const HalModel& model,
960 const ConversionData& data)
961{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100962 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100963 {
964 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
965 __func__,
966 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000967 toString(HalOperandType::INT32).c_str(),
968 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100969 }
970
971 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100973 {
974 return Fail("%s: failed to get activation input value", __func__);
975 }
976 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
977 return true;
978}
979
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100980template<typename HalPolicy,
981 typename HalOperation = typename HalPolicy::Operation,
982 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100983bool GetInputActivationFunction(const HalOperation& operation,
984 uint32_t inputIndex,
985 ActivationFn& outActivationFunction,
986 const HalModel& model,
987 const ConversionData& data)
988{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100989 return GetInputActivationFunctionImpl<HalPolicy>(operation,
990 inputIndex,
991 HalPolicy::OperandType::INT32,
992 outActivationFunction,
993 model,
994 data);
arovir01b0717b52018-09-05 17:03:25 +0100995}
996
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100997template<typename HalPolicy,
998 typename HalOperation = typename HalPolicy::Operation,
999 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001000bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1001 uint32_t inputIndex,
1002 ActivationFn& outActivationFunction,
1003 const HalModel& model,
1004 const ConversionData& data)
1005{
1006 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001007 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1008 inputIndex,
1009 HalPolicy::OperandType::INT32,
1010 outActivationFunction,
1011 model,
1012 data);
arovir01b0717b52018-09-05 17:03:25 +01001013}
1014
1015
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016template<typename HalPolicy,
1017 typename HalOperation = typename HalPolicy::Operation,
1018 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001019bool GetOptionalInputActivation(const HalOperation& operation,
1020 uint32_t inputIndex,
1021 ActivationFn& activationFunction,
1022 const HalModel& model,
1023 const ConversionData& data)
1024{
1025 if (operation.inputs.size() <= inputIndex)
1026 {
1027 activationFunction = ActivationFn::kActivationNone;
1028 }
1029 else
1030 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001032 {
1033 return Fail("%s: Operation has invalid inputs", __func__);
1034 }
1035 }
1036 return true;
1037}
1038
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001039template<typename HalPolicy,
1040 typename ConvolutionDescriptor,
1041 typename HalOperation = typename HalPolicy::Operation,
1042 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001043bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1044 uint32_t dilationXIndex,
1045 ConvolutionDescriptor& descriptor,
1046 const HalModel& model,
1047 const ConversionData& data)
1048{
1049 bool success = true;
1050 if (operation.inputs.size() >= dilationXIndex + 2)
1051 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001052 success &= GetInputScalar<HalPolicy>(operation,
1053 dilationXIndex,
1054 HalPolicy::OperandType::INT32,
1055 descriptor.m_DilationX,
1056 model,
1057 data);
1058 success &= GetInputScalar<HalPolicy>(operation,
1059 dilationXIndex + 1,
1060 HalPolicy::OperandType::INT32,
1061 descriptor.m_DilationY,
1062 model,
1063 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001064 }
1065
1066 return success;
1067}
1068
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001069template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001070 typename HalOperation = typename HalPolicy::Operation,
1071 typename HalModel = typename HalPolicy::Model>
1072bool GetOptionalBool(const HalOperation& operation,
1073 uint32_t inputIndex,
1074 const HalModel& model,
1075 const ConversionData& data)
1076{
1077 using HalOperand = typename HalPolicy::Operand;
1078
1079 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1080 if (!operand)
1081 {
1082 return false;
1083 }
1084
1085 if (!IsBool(*operand))
1086 {
1087 return false;
1088 }
1089
1090 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1091 if (!valueAddress)
1092 {
1093 return false;
1094 }
1095
1096 if (*(static_cast<const bool*>(valueAddress)))
1097 {
1098 return true;
1099 }
1100 else
1101 {
1102 return false;
1103 }
1104}
1105
1106template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001107 typename HalOperand = typename HalPolicy::Operand,
1108 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001109bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001110 std::vector<int32_t>& outValues,
1111 const HalModel& model,
1112 const ConversionData& data)
1113{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001115 {
1116 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1117 }
1118
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001119 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001120 if (!startAddress)
1121 {
1122 return Fail("%s: failed to get operand address", __func__, operand.type);
1123 }
1124
1125 // Check number of bytes is sensible
1126 const uint32_t numBytes = operand.location.length;
1127 if (numBytes % sizeof(int32_t) != 0)
1128 {
1129 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1130 __func__, numBytes, sizeof(int32_t));
1131 }
1132
1133 outValues.resize(numBytes / sizeof(int32_t));
1134 memcpy(outValues.data(), startAddress, numBytes);
1135 return true;
1136}
1137
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138template<typename HalPolicy,
1139 typename HalOperation = typename HalPolicy::Operation,
1140 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001141bool GetInputPaddingScheme(const HalOperation& operation,
1142 uint32_t inputIndex,
1143 PaddingScheme& outPaddingScheme,
1144 const HalModel& model,
1145 const ConversionData& data)
1146{
1147 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001148 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001149 {
1150 return Fail("%s: failed to get padding scheme input value", __func__);
1151 }
1152
1153 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1154 return true;
1155}
1156
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157template<typename HalPolicy,
1158 typename HalOperation = typename HalPolicy::Operation,
1159 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001160LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1161 uint32_t inputIndex,
1162 const HalModel& model,
1163 ConversionData& data)
1164{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001165 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001166 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001167 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1168
1169 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001170 if (!operand)
1171 {
1172 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1173 return LayerInputHandle();
1174 }
1175
1176 if (!IsOperandTypeSupportedForTensors(operand->type))
1177 {
1178 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1179 return LayerInputHandle();
1180 }
1181
Sadik Armagan44bcc022019-06-18 17:21:36 +01001182 try
arovir01b0717b52018-09-05 17:03:25 +01001183 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001184 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001185 if (IsDynamicTensor(operandTensorInfo))
1186 {
1187 Fail("%s: dynamic input tensors are not supported", __func__);
1188 return LayerInputHandle();
1189 }
arovir01b0717b52018-09-05 17:03:25 +01001190
Sadik Armagan44bcc022019-06-18 17:21:36 +01001191 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001192 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001193 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001194 {
1195 // NOTE: We must check whether we can support the input tensor on at least one
1196 // of the provided backends; otherwise we cannot convert the operation
1197 bool isInputSupported = false;
1198 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1199 IsInputSupported,
1200 data.m_Backends,
1201 isInputSupported,
1202 operandTensorInfo);
1203
1204 if (!isInputSupported)
1205 {
1206 Fail("%s: unsupported input tensor", __func__);
1207 return LayerInputHandle();
1208 }
1209
James Ward4e22f602020-10-20 15:50:33 +01001210 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001211 }
1212 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001213 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001214 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001215 // The tensor is either an operand internal to the model, or a model input.
1216 // It can be associated with an ArmNN output slot for an existing layer.
1217
1218 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1219 const uint32_t operandIndex = operation.inputs[inputIndex];
1220 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001221 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001222 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001223 case HalOperandLifeTime::CONSTANT_REFERENCE:
1224 {
1225 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1226 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1227 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001228 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001229 bool isSupported = false;
1230 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1231 IsConstantSupported,
1232 data.m_Backends,
1233 isSupported,
1234 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001235 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001236 {
1237 return LayerInputHandle();
1238 }
1239
1240 armnn::IConnectableLayer* constantLayer =
1241 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1242 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1243 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1244
1245 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1246 }
1247 else
1248 {
1249 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001250 return LayerInputHandle();
1251 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001252 break;
arovir01b0717b52018-09-05 17:03:25 +01001253 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001254 default:
arovir01b0717b52018-09-05 17:03:25 +01001255 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001256 // Unsupported lifetime for an input tensor
1257 Fail("%s: unsupported lifetime for input tensor: %s",
1258 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001259 return LayerInputHandle();
1260 }
arovir01b0717b52018-09-05 17:03:25 +01001261 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001262 }
1263 catch (UnsupportedOperand<HalOperandType>& e)
1264 {
1265 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1266 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001267 }
1268}
1269
Kevin May42477c12020-03-26 13:34:14 +00001270
1271#ifdef ARMNN_ANDROID_NN_V1_3
1272template<typename HalPolicy>
1273LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1274 uint32_t inputIndex,
1275 const::android::hardware::neuralnetworks::V1_3::Model& model,
1276 ConversionData& data)
1277{
1278 using HalOperand = typename HalPolicy::Operand;
1279 using HalOperandType = typename HalPolicy::OperandType;
1280 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1281
1282 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1283 if (!operand)
1284 {
1285 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1286 return LayerInputHandle();
1287 }
1288
1289 if (!IsOperandTypeSupportedForTensors(operand->type))
1290 {
1291 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1292 return LayerInputHandle();
1293 }
1294
1295 try
1296 {
1297 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001298
Kevin May42477c12020-03-26 13:34:14 +00001299 if (IsDynamicTensor(operandTensorInfo))
1300 {
Finn Williams291a16b2020-08-19 22:54:00 +01001301 data.m_DynamicInputsEncountered = true;
1302
Finn Williams9a044412020-08-17 19:08:35 +01001303 const uint32_t operandIndex = operation.inputs[inputIndex];
1304
1305 // Check if the dynamic input tensors have been inferred by one of the previous layers
1306 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001307 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001308 {
1309 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1310 }
1311 else
1312 {
1313 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1314 return LayerInputHandle();
1315 }
Kevin May42477c12020-03-26 13:34:14 +00001316 }
1317
1318 switch (operand->lifetime)
1319 {
1320 case HalOperandLifeTime::SUBGRAPH_INPUT:
1321 {
1322 // NOTE: We must check whether we can support the input tensor on at least one
1323 // of the provided backends; otherwise we cannot convert the operation
1324 bool isInputSupported = false;
1325 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1326 IsInputSupported,
1327 data.m_Backends,
1328 isInputSupported,
1329 operandTensorInfo);
1330
1331 if (!isInputSupported)
1332 {
1333 Fail("%s: unsupported input tensor", __func__);
1334 return LayerInputHandle();
1335 }
1336
James Ward4e22f602020-10-20 15:50:33 +01001337 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001338 }
1339 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1340 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1341 {
1342 // The tensor is either an operand internal to the model, or a model input.
1343 // It can be associated with an ArmNN output slot for an existing layer.
1344
1345 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1346 const uint32_t operandIndex = operation.inputs[inputIndex];
1347 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1348 }
1349 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1350 case HalOperandLifeTime::CONSTANT_REFERENCE:
1351 {
1352 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1353 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1354 if (tensorPin.IsValid())
1355 {
1356 bool isSupported = false;
1357 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1358 IsConstantSupported,
1359 data.m_Backends,
1360 isSupported,
1361 tensorPin.GetConstTensor().GetInfo());
1362 if (!isSupported)
1363 {
1364 return LayerInputHandle();
1365 }
1366
1367 armnn::IConnectableLayer* constantLayer =
1368 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1369 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1370 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1371
1372 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1373 }
1374 else
1375 {
1376 Fail("%s: invalid operand tensor", __func__);
1377 return LayerInputHandle();
1378 }
1379 break;
1380 }
1381 default:
1382 {
1383 // Unsupported lifetime for an input tensor
1384 Fail("%s: unsupported lifetime for input tensor: %s",
1385 __func__, toString(operand->lifetime).c_str());
1386 return LayerInputHandle();
1387 }
1388 }
1389 }
1390 catch (UnsupportedOperand<HalOperandType>& e)
1391 {
1392 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1393 return LayerInputHandle();
1394 }
1395}
1396#endif
1397
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001398template<typename HalPolicy,
1399 typename HalOperation = typename HalPolicy::Operation,
1400 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001401bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1402 uint32_t operationOutputIndex,
1403 armnn::IConnectableLayer& layer,
1404 uint32_t layerOutputIndex,
1405 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001406 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001407 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001408 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001409 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001410 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001411{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001412 using HalOperand = typename HalPolicy::Operand;
1413
1414 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001415 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1416 {
1417 return false;
1418 }
1419
1420 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001421 if (overrideOutputInfo == nullptr)
1422 {
1423 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1424 }
1425 else
1426 {
1427 outputSlot.SetTensorInfo(*overrideOutputInfo);
1428 }
1429
Finn Williamsa4983ce2020-07-23 12:55:12 +01001430 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001431 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001432 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001433 // Type one dynamic tensors require the previous layer's output shape for inference
1434 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1435 {
1436 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1437 {
1438 return false;
1439 }
1440 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001441 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001442 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001443 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001444 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1445
Sadik Armagandbda4b72020-09-03 11:33:07 +01001446 if(!isSupported)
1447 {
1448 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1449 {
1450 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1451 }
1452 return false;
1453 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001454 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001455
Finn Williamsa4983ce2020-07-23 12:55:12 +01001456 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001457
1458 if (activationFunction != ActivationFn::kActivationNone)
1459 {
1460 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1461 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1462 &layer, data);
1463
1464 if (!endLayer)
1465 {
1466 return Fail("%s: ProcessActivation failed", __func__);
1467 }
1468
1469 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1470 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1471 }
1472 else
1473 {
1474 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1475 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001476
Mike Kellyb5fdf382019-06-11 16:35:25 +01001477 return true;
1478}
1479
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001480template<typename HalPolicy,
1481 typename HalOperation = typename HalPolicy::Operation,
1482 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001483armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1484 uint32_t inputIndex,
1485 const HalModel& model,
1486 ConversionData& data)
1487{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001488 using HalOperand = typename HalPolicy::Operand;
1489
1490 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001491 if (!operand)
1492 {
1493 return armnn::DataLayout::NHWC;
1494 }
1495
1496 if (!IsBool(*operand))
1497 {
1498 return armnn::DataLayout::NHWC;
1499 }
1500
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001501 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001502 if (!valueAddress)
1503 {
1504 return armnn::DataLayout::NHWC;
1505 }
1506
1507 if (*(static_cast<const bool*>(valueAddress)))
1508 {
1509 return armnn::DataLayout::NCHW;
1510 }
1511 else
1512 {
1513 return armnn::DataLayout::NHWC;
1514 }
1515}
1516
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001517template<typename HalPolicy,
1518 typename HalOperation = typename HalPolicy::Operation,
1519 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001520bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1521 uint32_t outputIndex,
1522 armnn::IConnectableLayer& layer,
1523 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001524 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001525 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001526 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1527 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001528{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001529 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1530 outputIndex,
1531 layer,
1532 outputIndex,
1533 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001534 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001535 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001536 validateFunc,
1537 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001538}
1539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001540template<typename HalPolicy,
1541 typename HalOperation = typename HalPolicy::Operation,
1542 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001543bool ConvertToActivation(const HalOperation& operation,
1544 const char* operationName,
1545 const armnn::ActivationDescriptor& activationDesc,
1546 const HalModel& model,
1547 ConversionData& data)
1548{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001549 using HalOperand = typename HalPolicy::Operand;
1550
1551 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001552 if (!input.IsValid())
1553 {
1554 return Fail("%s: Input 0 is invalid", operationName);
1555 }
1556
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001557 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001558 if (!outputOperand)
1559 {
1560 return false;
1561 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001562
1563 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001564
1565 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001566
1567 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1568 {
1569 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1570 IsActivationSupported,
1571 data.m_Backends,
1572 isSupported,
1573 input.GetTensorInfo(),
1574 outInfo,
1575 activationDesc);
1576 };
1577
1578 if(IsDynamicTensor(outInfo))
1579 {
1580 isSupported = AreDynamicTensorsSupported();
1581 }
1582 else
1583 {
1584 validateFunc(outInfo, isSupported);
1585 }
1586
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001587 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001588 {
1589 return false;
1590 }
1591
1592 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001593 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001594 input.Connect(layer->GetInputSlot(0));
1595
Finn Williamsa4983ce2020-07-23 12:55:12 +01001596 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001597}
1598
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001599template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001600 typename HalOperation = typename HalPolicy::Operation,
1601 typename HalModel = typename HalPolicy::Model>
1602bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1603{
1604 armnn::ActivationDescriptor desc;
1605 desc.m_Function = armnn::ActivationFunction::ReLu;
1606
1607 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1608}
1609
1610template<typename HalPolicy,
1611 typename HalOperation = typename HalPolicy::Operation,
1612 typename HalModel = typename HalPolicy::Model>
1613bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1614{
1615 armnn::ActivationDescriptor desc;
1616 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1617 desc.m_A = 1.0f;
1618 desc.m_B = -1.0f;
1619
1620 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1621}
1622
1623template<typename HalPolicy,
1624 typename HalOperation = typename HalPolicy::Operation,
1625 typename HalModel = typename HalPolicy::Model>
1626bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1627{
1628 armnn::ActivationDescriptor desc;
1629 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1630 desc.m_A = 6.0f;
1631
1632 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1633}
1634
1635template<typename HalPolicy,
1636 typename HalOperation = typename HalPolicy::Operation,
1637 typename HalModel = typename HalPolicy::Model>
1638bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1639{
1640 armnn::ActivationDescriptor desc;
1641 desc.m_Function = armnn::ActivationFunction::TanH;
1642 desc.m_A = 1.0f; // android nn does not support tanH parameters
1643 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1644
1645 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1646}
1647
1648template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001649 typename HalOperation = typename HalPolicy::Operation,
1650 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001651bool ConvertPaddings(const HalOperation& operation,
1652 const HalModel& model,
1653 ConversionData& data,
1654 unsigned int rank,
1655 armnn::PadDescriptor& padDescriptor)
1656{
1657 using HalOperand = typename HalPolicy::Operand;
1658
1659 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1660 if (!paddingsOperand)
1661 {
1662 return Fail("%s: Could not read paddings operand", __func__);
1663 }
1664
1665 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1666 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1667 {
1668 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1669 }
1670
1671 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001672 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1673 {
1674 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1675 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001676
1677 // add padding for each dimension of input tensor.
1678 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1679 {
1680 int paddingBeforeInput = paddings[i];
1681 int paddingAfterInput = paddings[i + 1];
1682
1683 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1684 {
1685 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1686 }
1687
1688 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1689 }
1690
1691 return true;
1692}
1693
1694template<typename HalPolicy,
1695 typename HalOperation = typename HalPolicy::Operation,
1696 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001697bool ConvertPooling2d(const HalOperation& operation,
1698 const char* operationName,
1699 armnn::PoolingAlgorithm poolType,
1700 const HalModel& model,
1701 ConversionData& data)
1702{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001703 using HalOperand = typename HalPolicy::Operand;
1704 using HalOperandType = typename HalPolicy::OperandType;
1705
1706 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001707 if (!input.IsValid())
1708 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001709 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001710 }
1711
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001712 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001713 if (!output)
1714 {
1715 return Fail("%s: Could not read output 0", __func__);
1716 }
1717
1718 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1719 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1720
arovir01b0717b52018-09-05 17:03:25 +01001721 armnn::Pooling2dDescriptor desc;
1722 desc.m_PoolType = poolType;
1723 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001724 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001725
1726 ActivationFn activation;
1727
Sadik Armagan15d63e22019-07-26 16:59:35 +01001728 auto inputSize = operation.inputs.size();
1729
1730 if (inputSize >= 10)
1731 {
1732 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1733 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1734 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1735 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1736 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1737 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1738 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1739 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1740 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1741 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1742 {
1743 return Fail("%s: Operation has invalid inputs", operationName);
1744 }
1745
Kevin May42477c12020-03-26 13:34:14 +00001746 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001747 {
1748 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1749 }
1750 }
1751 else
arovir01b0717b52018-09-05 17:03:25 +01001752 {
1753 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1754 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001755 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1756 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1757 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1758 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1759 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1760 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001761 {
1762 return Fail("%s: Operation has invalid inputs", operationName);
1763 }
1764
Kevin May42477c12020-03-26 13:34:14 +00001765 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001766 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001767 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001768 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001769
1770 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1771 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1772 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1773
1774 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1775 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001776 }
1777
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001778 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001779
1780 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1781 {
1782 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1783 IsPooling2dSupported,
1784 data.m_Backends,
1785 isSupported,
1786 inputInfo,
1787 outputInfo,
1788 desc);
1789
1790 };
1791
1792 if(IsDynamicTensor(outputInfo))
1793 {
1794 isSupported = AreDynamicTensorsSupported();
1795 }
1796 else
1797 {
1798 validateFunc(outputInfo, isSupported);
1799 }
1800
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001801 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001802 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001803 return false;
arovir01b0717b52018-09-05 17:03:25 +01001804 }
arovir01b0717b52018-09-05 17:03:25 +01001805
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001806 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1807 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001808 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001809 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001810 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001811
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001812 input.Connect(pooling2dLayer->GetInputSlot(0));
1813
Finn Williamsa4983ce2020-07-23 12:55:12 +01001814 if (!isSupported)
1815 {
1816 return false;
1817 }
1818
Kevin Mayfcf2a152020-09-08 16:06:32 +01001819 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1820 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001821}
1822
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001823template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001824 typename HalOperation = typename HalPolicy::Operation,
1825 typename HalModel = typename HalPolicy::Model>
1826bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001827{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001828 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001829
1830 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1831 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1832
1833 if (!input0.IsValid() || !input1.IsValid())
1834 {
1835 return Fail("%s: Operation has invalid inputs", __func__);
1836 }
1837
1838 // The FuseActivation parameter is always the input index 2
1839 // and it should be optional
1840 ActivationFn activationFunction;
1841 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1842 {
1843 return Fail("%s: Operation has invalid inputs", __func__);
1844 }
1845
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001846 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001847 if (!outputOperand)
1848 {
1849 return false;
1850 }
1851
1852 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1853 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1854
1855 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001856
1857 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001858 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1859 {
1860 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1861 IsAdditionSupported,
1862 data.m_Backends,
1863 isSupported,
1864 inputInfo0,
1865 inputInfo1,
1866 outputInfo);
1867 };
1868
1869 if(!IsDynamicTensor(outputInfo))
1870 {
1871 validateFunc(outputInfo, isSupported);
1872 }
1873 else
1874 {
1875 isSupported = AreDynamicTensorsSupported();
1876 }
1877
Mike Kelly46272802019-08-14 17:00:48 +01001878 if (!isSupported)
1879 {
1880 return false;
1881 }
1882
1883 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001884
Kevin Mayfcf2a152020-09-08 16:06:32 +01001885 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1886 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001887 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001888 return false;
1889 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001890
Kevin Mayfcf2a152020-09-08 16:06:32 +01001891 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1892 data, nullptr, validateFunc, activationFunction);
1893
Mike Kelly46272802019-08-14 17:00:48 +01001894}
1895
1896template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001897 typename HalOperation = typename HalPolicy::Operation,
1898 typename HalModel = typename HalPolicy::Model>
1899bool ConvertArgMinMax(const HalOperation& operation,
1900 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001901 ConversionData& data,
1902 armnn::ArgMinMaxFunction argMinMaxFunction)
1903{
1904 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1905
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001906 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001907 using HalOperandType = typename HalPolicy::OperandType;
1908
1909 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1910
1911 if (!input0.IsValid())
1912 {
1913 return Fail("%s: Operation has invalid inputs", __func__);
1914 }
1915
1916 int32_t axis;
1917 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1918 {
1919 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1920 }
1921
1922 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1923 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1924
1925 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1926 {
1927 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1928 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1929 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1930 return Fail("%s: Axis must be in range [-n, n)", __func__);
1931 }
1932
1933 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1934 if (!output)
1935 {
1936 return Fail("%s: Could not read output 0", __func__);
1937 }
1938
1939 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1940
1941 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001942
1943 armnn::ArgMinMaxDescriptor descriptor;
1944 descriptor.m_Function = argMinMaxFunction;
1945 descriptor.m_Axis = axis;
1946
1947 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001948
1949 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1950 {
1951 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1952 IsArgMinMaxSupported,
1953 data.m_Backends,
1954 isSupported,
1955 inputInfo0,
1956 outputInfo,
1957 descriptor);
1958 };
1959
1960 if(IsDynamicTensor(outputInfo))
1961 {
1962 isSupported = AreDynamicTensorsSupported();
1963 }
1964 else
1965 {
1966 validateFunc(outputInfo, isSupported);
1967 }
1968
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001969 if (!isSupported)
1970 {
1971 return false;
1972 }
1973
1974 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1975 assert(layer != nullptr);
1976
1977 input0.Connect(layer->GetInputSlot(0));
1978
Finn Williamsa4983ce2020-07-23 12:55:12 +01001979 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001980}
1981
1982template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001983 typename HalOperation = typename HalPolicy::Operation,
1984 typename HalModel = typename HalPolicy::Model>
1985bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001986{
Keith Davis6e4081f2020-09-03 13:17:21 +01001987 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001988 using HalOperandType = typename HalPolicy::OperandType;
1989
1990 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1991 if (operation.inputs.size() <= 1)
1992 {
1993 return Fail("%s: Operation has insufficient arguments", __func__);
1994 }
1995
1996 // Get inputs and outputs
1997 const std::size_t numInputTensors = operation.inputs.size() - 1;
1998
1999 int32_t concatDim;
2000 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2001 {
2002 return Fail("%s: Operation has invalid inputs", __func__);
2003 }
2004
2005 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2006 if (!outputOperand)
2007 {
2008 return Fail("%s: Operation has no outputs", __func__);
2009 }
2010
Keith Davis6e4081f2020-09-03 13:17:21 +01002011 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2012 armnn::TensorShape outputShape = outputInfo.GetShape();
2013 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002014 //
2015 // handle negative concat dims along the lines of tensorflow as described here:
2016 // https://www.tensorflow.org/api_docs/python/tf/concat
2017 // "negative axis refers to axis + rank(values)-th dimension"
2018 //
2019 if (concatDim < 0)
2020 {
2021 concatDim += outputShape.GetNumDimensions();
2022 }
2023
2024 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2025 {
2026 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2027 }
2028
2029 std::vector<LayerInputHandle> inputHandles;
2030 std::vector<armnn::TensorShape> inputShapes;
2031
2032 inputHandles.reserve(numInputTensors);
2033 inputShapes.reserve(numInputTensors);
2034
Keith Davis6e4081f2020-09-03 13:17:21 +01002035 bool inputsHaveBeenReshaped = false;
2036 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002037 for (uint32_t i = 0; i < numInputTensors; ++i)
2038 {
2039 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2040 if (!operand)
2041 {
2042 return Fail("%s: Operation has invalid inputs", __func__);
2043 }
2044
Teresa Charlin3b959602019-10-31 17:05:47 +00002045 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2046 if (!operandInputHandle.IsValid())
2047 {
2048 return Fail("%s: Operation has invalid inputs", __func__);
2049 }
Mike Kellyb8805202019-07-31 17:25:43 +01002050
Keith Davis6e4081f2020-09-03 13:17:21 +01002051 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002052 if (operandShape.GetNumDimensions() == 0)
2053 {
2054 return Fail("%s: Operands with rank 0 are not supported", __func__);
2055 }
2056
2057 if (RequiresReshape(operandShape))
2058 {
2059 inputsHaveBeenReshaped = true;
2060
2061 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2062
2063 // Expand the tensor to three dimensions
2064 if (operandShape.GetNumDimensions() == 2)
2065 {
2066 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2067 tensorDimensionsAdded = 1;
2068 }
2069 else
2070 {
2071 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2072 tensorDimensionsAdded = 2;
2073 }
2074
Kevin Mayaed08ac2019-12-12 16:33:31 +00002075 armnn::ReshapeDescriptor reshapeDescriptor;
2076 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2077
2078 bool isSupported = false;
2079 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2080 IsReshapeSupported,
2081 data.m_Backends,
2082 isSupported,
2083 operandInputHandle.GetTensorInfo(),
2084 reshapeInfo,
2085 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002086
Kevin Mayaed08ac2019-12-12 16:33:31 +00002087 if (!isSupported)
2088 {
2089 return false;
2090 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002091 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002092
2093 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002094 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002095 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2096 }
2097
2098 inputShapes.emplace_back(operandShape);
2099 inputHandles.emplace_back(operandInputHandle);
2100
2101 if (!inputHandles.back().IsValid())
2102 {
2103 return Fail("%s: Operation has invalid inputs", __func__);
2104 }
2105 }
2106
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002107 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002108
2109 if (inputsHaveBeenReshaped)
2110 {
2111 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2112 concatDim += tensorDimensionsAdded;
2113
2114 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2115 if (tensorDimensionsAdded == 1)
2116 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002117 if (IsDynamicTensor(outputInfo))
2118 {
2119 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2120 }
2121 else
2122 {
2123 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2124 }
Mike Kellyb8805202019-07-31 17:25:43 +01002125 }
2126 else if (tensorDimensionsAdded == 2)
2127 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002128 if (IsDynamicTensor(outputInfo))
2129 {
2130 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2131 }
2132 else
2133 {
2134 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2135 }
Mike Kellyb8805202019-07-31 17:25:43 +01002136 }
2137 }
2138
2139 // Check if permutations is required and get the pair of permutations required for the concatenation.
2140 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2141 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002142 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002143 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2144 concatDim,
2145 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002146
Keith Davis6e4081f2020-09-03 13:17:21 +01002147 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2148 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002149 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002150 if (needPermute)
2151 {
2152 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2153 }
2154
2155 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002156 }
Mike Kellyb8805202019-07-31 17:25:43 +01002157 // this is no-op for identity swizzles, otherwise it replaces both
2158 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002159 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002160 {
2161 return false;
2162 }
Mike Kellyb8805202019-07-31 17:25:43 +01002163
2164 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2165 armnn::OriginsDescriptor concatDescriptor;
2166
2167 try
2168 {
2169 // The concat descriptor is always created across the only supported concat dimension
2170 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002171 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2172 inputShapes.end(),
2173 concatDim);
2174 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002175 {
2176 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2177 }
2178
2179 // Validate the output shape is correct given the input shapes based on the
2180 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002181 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002182 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002183 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2184 {
2185 return Fail("%s: Error validating the output shape for concat", __func__);
2186 }
Mike Kellyb8805202019-07-31 17:25:43 +01002187 }
2188
2189 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2190 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002191 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002192
Keith Davis6e4081f2020-09-03 13:17:21 +01002193 bool isSupported = false;
2194 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2195 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2196 outputInfo, concatDescriptor);
2197 };
2198
2199 if (!isDynamicTensor)
2200 {
2201 validateFunc(outputInfo, isSupported);
2202 }
2203 else
2204 {
2205 isSupported = AreDynamicTensorsSupported();
2206 }
2207
Mike Kellyb8805202019-07-31 17:25:43 +01002208 if (!isSupported)
2209 {
2210 return false;
2211 }
2212
2213 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2214 assert(layer != nullptr);
2215 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002216 // Connect inputs to the layer
2217 const int numInputSlots = layer->GetNumInputSlots();
2218 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2219 for (int i = 0; i < numInputSlots; ++i)
2220 {
2221 // connect the input directly to the merge (concat) layer
2222 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2223 }
2224
Keith Davis6e4081f2020-09-03 13:17:21 +01002225 // Transpose the output shape
2226 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002227 armnn::TransposeDescriptor transposeDesc;
2228 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002229 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2230 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2231 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002232 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002233 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002234 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002235 data.m_Backends,
2236 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002237 inputTransposeInfo,
2238 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002239 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002240 if (!isSupported)
2241 {
2242 return false;
2243 }
Mike Kellyb8805202019-07-31 17:25:43 +01002244 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002245 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002246 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002247 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002248
2249 return true;
2250 };
2251
2252 if (needPermute && !isDynamicTensor)
2253 {
2254 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002255 }
2256
2257 if (inputsHaveBeenReshaped)
2258 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002259 if (isDynamicTensor)
2260 {
2261 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002262 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002263 if (!ValidateConcatOutputShape(inputShapes,
2264 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2265 concatDim))
2266 {
2267 return Fail("%s: Error validating the output shape for concat", __func__);
2268 }
2269 transposeOutputShape();
2270 }
2271
Mike Kellyb8805202019-07-31 17:25:43 +01002272 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002273 // Undo the reshape knowing the amount of dimensions added
2274 if (tensorDimensionsAdded == 1)
2275 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002276 afterConcatInfo.SetShape(
2277 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002278 }
2279 else if (tensorDimensionsAdded == 2)
2280 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002281 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002282 }
2283
Kevin Mayaed08ac2019-12-12 16:33:31 +00002284 armnn::ReshapeDescriptor reshapeDescriptor;
2285 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002286 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002287
Keith Davis6e4081f2020-09-03 13:17:21 +01002288 isSupported = false;
2289 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2291 IsReshapeSupported,
2292 data.m_Backends,
2293 isSupported,
2294 concatInfo,
2295 afterConcatInfo,
2296 reshapeDescriptor);
2297 };
2298
2299 if (!IsDynamicTensor(afterConcatInfo))
2300 {
2301 validateReshapeFunc(afterConcatInfo, isSupported);
2302 }
2303 else
2304 {
2305 isSupported = AreDynamicTensorsSupported();
2306 }
2307
Kevin Mayaed08ac2019-12-12 16:33:31 +00002308 if (!isSupported)
2309 {
2310 return false;
2311 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002312 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2313 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2314 0,
2315 *layer,
2316 model,
2317 data,
2318 nullptr,
2319 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002320 }
2321
Keith Davis6e4081f2020-09-03 13:17:21 +01002322 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002323}
2324
2325template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002326 typename HalOperation = typename HalPolicy::Operation,
2327 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002328bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2329{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002330 using HalOperand = typename HalPolicy::Operand;
2331 using HalOperandType = typename HalPolicy::OperandType;
2332
2333 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002334 if (!input.IsValid())
2335 {
2336 return Fail("%s: Operation has invalid inputs", __func__);
2337 }
2338
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002339 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002340 if (!output)
2341 {
2342 return Fail("%s: Could not read output 0", __func__);
2343 }
2344
2345 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002346 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002347
2348 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002349 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2350 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002351
2352 if (!weightsPin.IsValid() || !biasPin.IsValid())
2353 {
2354 return Fail("%s: Operation has invalid inputs", __func__);
2355 }
2356
2357 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002358 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002359 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2360
2361 armnn::Convolution2dDescriptor desc;
2362 desc.m_DataLayout = armnn::DataLayout::NHWC;
2363 ActivationFn activation;
2364
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002365 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002366 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002367 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2368 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2369 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2370 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2371 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2372 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002373 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002374 {
2375 return Fail("%s: Operation has invalid inputs", __func__);
2376 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002377 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002378 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002379 {
2380 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002381 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2382 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2383 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002384 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002385 {
2386 return Fail("%s: Operation has invalid inputs", __func__);
2387 }
2388
2389 const uint32_t kernelX = weights.GetShape()[2];
2390 const uint32_t kernelY = weights.GetShape()[1];
2391 const uint32_t inputX = inputInfo.GetShape()[2];
2392 const uint32_t inputY = inputInfo.GetShape()[1];
2393
2394 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2395 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002396 }
2397 else
2398 {
2399 return Fail("%s: Unsupported number of operation inputs", __func__);
2400 }
2401
2402 desc.m_BiasEnabled = true;
2403 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2404
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002405 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002406 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2407 {
2408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2409 IsConvolution2dSupported,
2410 data.m_Backends,
2411 isSupported,
2412 inputInfo,
2413 outputInfo,
2414 desc,
2415 weights.GetInfo(),
2416 biases);
2417 };
2418
2419 if(!IsDynamicTensor(outputInfo))
2420 {
2421 validateFunc(outputInfo, isSupported);
2422 }
2423 else
2424 {
2425 isSupported = AreDynamicTensorsSupported();
2426 }
2427
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002428 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002429 {
2430 return false;
2431 }
2432
2433 armnn::IConnectableLayer* startLayer =
2434 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2435
2436 if (!startLayer)
2437 {
2438 return Fail("%s: AddConvolution2dLayer failed", __func__);
2439 }
2440
Mike Kellyb5fdf382019-06-11 16:35:25 +01002441 input.Connect(startLayer->GetInputSlot(0));
2442
Kevin Mayfcf2a152020-09-08 16:06:32 +01002443 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2444 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002445}
2446
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002447template<typename HalPolicy,
2448 typename HalOperation = typename HalPolicy::Operation,
2449 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002450bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2451{
2452 using HalOperand = typename HalPolicy::Operand;
2453 using HalOperandType = typename HalPolicy::OperandType;
2454
2455 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2456 if (!input.IsValid() )
2457 {
2458 return Fail("%s: Operation has invalid inputs", __func__);
2459 }
2460
2461 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2462 unsigned int rank = inputInfo.GetNumDimensions();
2463 if (rank != 4)
2464 {
2465 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2466 }
2467
2468 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2469 if (!output)
2470 {
2471 return Fail("%s: Could not read output 0", __func__);
2472 }
2473
2474 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002475
2476 armnn::DepthToSpaceDescriptor descriptor;
2477
2478 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2479 if (descriptor.m_BlockSize <= 1)
2480 {
2481 return Fail("%s: Block size must be at least 1 in all dimensions");
2482 }
2483
2484 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002485 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002486 {
2487 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2488 }
2489
2490 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002491 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2492 {
2493 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2494 IsDepthToSpaceSupported,
2495 data.m_Backends,
2496 isSupported,
2497 inputInfo,
2498 outputInfo,
2499 descriptor);
2500 };
2501
2502 if(!IsDynamicTensor(outputInfo))
2503 {
2504 validateFunc(outputInfo, isSupported);
2505 }
2506 else
2507 {
2508 isSupported = AreDynamicTensorsSupported();
2509 }
2510
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002511 if (!isSupported)
2512 {
2513 return false;
2514 }
2515
2516 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2517 assert(layer != nullptr);
2518 input.Connect(layer->GetInputSlot(0));
2519
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002520 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002521}
2522
2523template<typename HalPolicy,
2524 typename HalOperation = typename HalPolicy::Operation,
2525 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002526bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2527{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002528 using HalOperand = typename HalPolicy::Operand;
2529 using HalOperandType = typename HalPolicy::OperandType;
2530
2531 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002532
2533 if (!input.IsValid())
2534 {
2535 return Fail("%s: Operation has invalid inputs", __func__);
2536 }
2537
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002538 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002539
2540 if (!output)
2541 {
2542 return Fail("%s: Could not read output 0", __func__);
2543 }
2544
2545 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002546 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002547
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002548 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002549 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002550 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002551
2552 if (weightsOperand == nullptr)
2553 {
2554 return Fail("%s: Operand is invalid", __func__);
2555 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002556 // Basic sanity check on the weights shape.
2557 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2558 // [1, filter_height, filter_width, depth_out]
2559 if (weightsOperand->dimensions[0] != 1)
2560 {
2561 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2562 }
2563
Mike Kellyb5fdf382019-06-11 16:35:25 +01002564 armnn::DepthwiseConvolution2dDescriptor desc;
2565 desc.m_DataLayout = armnn::DataLayout::NHWC;
2566
Mike Kellyb5fdf382019-06-11 16:35:25 +01002567 // Reinterpret weight data as [ H, W, I, M ]
2568 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2569 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002570 inputInfo.GetShape()[3],
2571 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002572
2573 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2574 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2575
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002576 const ConstTensorPin weightsPin =
2577 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2578 1,
2579 model,
2580 data,
2581 HWIMToMIHW,
2582 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002583
2584 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002585 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002586
2587 if (!weightsPin.IsValid() || !biasPin.IsValid())
2588 {
2589 return Fail("%s: Operation has invalid inputs", __func__);
2590 }
2591
2592 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2593 armnn::ConstTensor bias = biasPin.GetConstTensor();
2594 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2595
2596 ActivationFn activation;
2597
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002598 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002599 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002600 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2601 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2602 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2603 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2604 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2605 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002606 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002607 {
2608 return Fail("%s: Operation has invalid inputs", __func__);
2609 }
2610 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002611 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002612 {
2613 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002614 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2615 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2616 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002617 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002618 {
2619 return Fail("%s: Operation has invalid inputs", __func__);
2620 }
2621
2622 const uint32_t kernelX = weights.GetShape()[3];
2623 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002624 const uint32_t inputX = inputInfo.GetShape()[2];
2625 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002626
2627 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2628 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2629 }
2630 else
2631 {
2632 return Fail("%s: Unsupported number of operation inputs", __func__);
2633 }
2634
2635 desc.m_BiasEnabled = true;
2636 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2637
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002638 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002639 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2640 {
2641 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2642 IsDepthwiseConvolutionSupported,
2643 data.m_Backends,
2644 isSupported,
2645 inputInfo,
2646 outputInfo,
2647 desc,
2648 weights.GetInfo(),
2649 biases);
2650 };
2651
2652 if(!IsDynamicTensor(outputInfo))
2653 {
2654 validateFunc(outputInfo, isSupported);
2655 }
2656 else
2657 {
2658 isSupported = AreDynamicTensorsSupported();
2659 }
2660
2661
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002662 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002663 {
2664 return false;
2665 }
2666
2667 armnn::IConnectableLayer* startLayer =
2668 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2669 if (!startLayer)
2670 {
2671 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2672 }
2673
Mike Kellyb5fdf382019-06-11 16:35:25 +01002674 input.Connect(startLayer->GetInputSlot(0));
2675
Kevin Mayfcf2a152020-09-08 16:06:32 +01002676 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2677 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002678}
2679
Mike Kelly3c673942019-07-25 09:26:06 +01002680template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002681 typename HalOperation = typename HalPolicy::Operation,
2682 typename HalModel = typename HalPolicy::Model>
2683bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002684{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002685 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002686
2687 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2688 if (!input.IsValid())
2689 {
2690 return Fail("%s: Operation has invalid input", __func__);
2691 }
2692
Sadik Armagan98c0f662019-11-21 15:54:36 +00002693 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2694 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2695 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2696 {
2697 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2698 }
2699
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002700 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002701 if (!outputOperand)
2702 {
2703 return Fail("%s: Operation has invalid outputs", __func__);
2704 }
2705
2706 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002707
2708 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002709 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2710 {
2711 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2712 IsDequantizeSupported,
2713 data.m_Backends,
2714 isSupported,
2715 inputInfo,
2716 outputInfo);
2717 };
2718
2719 if(IsDynamicTensor(outputInfo))
2720 {
2721 isSupported = AreDynamicTensorsSupported();
2722 }
2723 else
2724 {
2725 validateFunc(outputInfo, isSupported);
2726 }
2727
Mike Kelly46272802019-08-14 17:00:48 +01002728 if (!isSupported)
2729 {
2730 return false;
2731 }
2732
2733 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2734 assert(layer != nullptr);
2735 input.Connect(layer->GetInputSlot(0));
2736
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002737 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002738}
2739
2740template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002741 typename HalOperation = typename HalPolicy::Operation,
2742 typename HalModel = typename HalPolicy::Model>
2743bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002744{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002745 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002746
2747 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2748 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2749
2750 if (!input0.IsValid() || !input1.IsValid())
2751 {
2752 return Fail("%s: Operation has invalid inputs", __func__);
2753 }
2754
2755 // The FuseActivation parameter is always the input index 2
2756 // and it should be optional
2757 ActivationFn activationFunction;
2758 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2759 {
2760 return Fail("%s: Operation has invalid inputs", __func__);
2761 }
2762
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002763 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002764 if (!output)
2765 {
2766 return Fail("%s: Could not read output 0", __func__);
2767 }
2768
2769 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002770
2771 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002772 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2773 {
2774 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2775 IsDivisionSupported,
2776 data.m_Backends,
2777 isSupported,
2778 input0.GetTensorInfo(),
2779 input1.GetTensorInfo(),
2780 outputInfo);
2781 };
2782
2783 if(!IsDynamicTensor(outputInfo))
2784 {
2785 validateFunc(outputInfo, isSupported);
2786 }
2787 else
2788 {
2789 isSupported = AreDynamicTensorsSupported();
2790 }
2791
Mike Kelly46272802019-08-14 17:00:48 +01002792 if (!isSupported)
2793 {
2794 return false;
2795 }
2796
2797 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002798
Kevin Mayfcf2a152020-09-08 16:06:32 +01002799 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2800 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002801 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002802 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002803 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002804
2805 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2806 data, nullptr, validateFunc, activationFunction);
2807
Mike Kelly46272802019-08-14 17:00:48 +01002808}
2809
2810template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002811 typename HalOperation = typename HalPolicy::Operation,
2812 typename HalModel = typename HalPolicy::Model>
2813bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002814{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002815 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002816
2817 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2818 if (!input.IsValid())
2819 {
2820 return Fail("%s: Operation has invalid inputs", __func__);
2821 }
2822
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002823 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002824 if (!outputOperand)
2825 {
2826 return Fail("%s: Operation has invalid outputs", __func__);
2827 }
2828
2829 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002830
2831 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002832 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2833 {
2834 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2835 IsFloorSupported,
2836 data.m_Backends,
2837 isSupported,
2838 input.GetTensorInfo(),
2839 outputInfo);
2840 };
2841
2842 if(!IsDynamicTensor(outputInfo))
2843 {
2844 validateFunc(outputInfo, isSupported);
2845 }
2846 else
2847 {
2848 isSupported = AreDynamicTensorsSupported();
2849 }
2850
Mike Kelly46272802019-08-14 17:00:48 +01002851 if (!isSupported)
2852 {
2853 return false;
2854 }
2855
2856 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2857 assert(layer != nullptr);
2858 input.Connect(layer->GetInputSlot(0));
2859
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002860 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002861}
2862
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002863inline bool IsQSymm8(const V1_0::Operand&)
2864{
2865 return false;
2866}
2867
Kevin May42477c12020-03-26 13:34:14 +00002868#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002869
2870inline bool IsQSymm8(const V1_2::Operand& operand)
2871{
2872 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2873}
2874
2875#endif
2876
Kevin May42477c12020-03-26 13:34:14 +00002877#ifdef ARMNN_ANDROID_NN_V1_3
2878
2879inline bool IsQSymm8(const V1_3::Operand& operand)
2880{
2881 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2882}
2883
2884#endif
2885
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002886enum class DequantizeStatus
2887{
2888 SUCCESS,
2889 NOT_REQUIRED,
2890 INVALID_OPERAND
2891};
2892
2893using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2894
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002895template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002896 typename HalOperation = typename HalPolicy::Operation,
2897 typename HalModel = typename HalPolicy::Model>
2898DequantizeResult DequantizeIfRequired(size_t operand_index,
2899 const HalOperation& operation,
2900 const HalModel& model,
2901 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002902{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002903 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002904
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002905 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002906 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002907 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002908 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002909 }
2910
2911 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2912 {
2913 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002914 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002915 }
2916
2917 const size_t weightsInputIndex = operation.inputs[operand_index];
2918
2919 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2920 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002921 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002922 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002923 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002924 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002925 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2926 {
2927 continue;
2928 }
2929
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002930 size_t outOpIndex = weightsInputIndex + 1;
2931 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002932 {
2933 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002934 }
2935
2936 if (outOpIndex != weightsInputIndex)
2937 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002938 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002939 }
2940
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002941 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002942 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002943
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002944 if (!IsQSymm8(*operand))
2945 {
2946 // Only supporting dequantize from QSYMM8 to FLOAT
2947 break;
2948 }
2949
2950 // Allocate a new buffer for the dequantized data and manually dequantize
2951 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2952 if (!startValue)
2953 {
2954 // Failed to get the operand address
2955 break;
2956 }
2957
2958 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2959 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002960 const float quantizationScale = operand->scale;
2961
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002962 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2963 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2964 {
2965 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002966 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002967 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2968 }
2969
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002970 // Construct tensor info for dequantized ConstTensor
2971 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2972 operand->dimensions.data(),
2973 armnn::DataType::Float32);
2974
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002975 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2976 std::move(tensorInfo),
2977 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002978 }
2979
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002980 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002981}
2982
2983template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002984 typename HalOperation = typename HalPolicy::Operation,
2985 typename HalModel = typename HalPolicy::Model>
2986ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2987 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002988 const ConversionData& data,
2989 size_t operandIndex,
2990 bool optional = false)
2991{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002992 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2993
2994 DequantizeStatus status = std::get<3>(dequantized);
2995 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002996 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002997 case DequantizeStatus::INVALID_OPERAND:
2998 {
2999 // return invalid const tensor pin
3000 return ConstTensorPin();
3001 }
3002 case DequantizeStatus::NOT_REQUIRED:
3003 {
3004 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3005 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3006 }
3007 case DequantizeStatus::SUCCESS:
3008 default:
3009 {
3010 return ConstTensorPin(
3011 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3012 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003013 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003014}
3015
3016
Mike Kelly46272802019-08-14 17:00:48 +01003017template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003018 typename HalOperation = typename HalPolicy::Operation,
3019 typename HalModel = typename HalPolicy::Model>
3020bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003021{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003022 using HalOperand = typename HalPolicy::Operand;
3023
Mike Kelly46272802019-08-14 17:00:48 +01003024 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3025 if (!input.IsValid())
3026 {
3027 return Fail("%s: Operation has invalid inputs", __func__);
3028 }
3029
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003030 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003031 if (!output)
3032 {
3033 return Fail("%s: Could not read output 0", __func__);
3034 }
3035
3036 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3037 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3038
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003039 LayerInputHandle weightsInput = LayerInputHandle();
3040 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3041 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003042 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003043 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003044 }
3045
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003046 const armnn::TensorInfo& weightsInfo = GetTensorInfoForOperand(*weightsOperand);
3047 bool constantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
3048
3049 armnn::Optional<armnn::ConstTensor> optionalWeights = armnn::EmptyOptional();
3050 if (!constantWeights)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003051 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003052 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3053 if (!weightsInput.IsValid())
3054 {
3055 return Fail("%s: Operation has invalid inputs", __func__);
3056 }
3057 }
3058 else
3059 {
3060 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3061 if (!weightsPin.IsValid())
3062 {
3063 return Fail("%s: Operation has invalid weights", __func__);
3064 }
3065 optionalWeights = armnn::Optional<armnn::ConstTensor>(weightsPin.GetConstTensor());
Mike Kelly46272802019-08-14 17:00:48 +01003066 }
3067
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003068 LayerInputHandle biasInput = LayerInputHandle();
3069 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3070 if (!biasOperand)
3071 {
3072 return Fail("%s: Could not read bias", __func__);
3073 }
3074 armnn::TensorInfo biasInfo = GetTensorInfoForOperand(*biasOperand);
3075 bool constantBias = IsOperandConstant<HalPolicy>(*biasOperand);
3076
3077 armnn::Optional<armnn::ConstTensor> optionalBias = armnn::EmptyOptional();
3078 if (!constantBias)
3079 {
3080 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data);
3081 if (!biasInput.IsValid())
3082 {
3083 return Fail("%s: Operation has invalid inputs", __func__);
3084 }
3085 }
3086 else
3087 {
3088 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
3089 if (!biasPin.IsValid())
3090 {
3091 return Fail("%s: Operation has invalid bias", __func__);
3092 }
3093 optionalBias = armnn::Optional<armnn::ConstTensor>(biasPin.GetConstTensor());
3094 }
3095
3096 if ((constantWeights && !constantBias) || (!constantWeights && constantBias))
3097 {
3098 return Fail("%s: Non-compatible weights and bias", __func__);
3099 }
3100
Mike Kelly46272802019-08-14 17:00:48 +01003101 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003102 try
3103 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003104 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003105 }
3106 catch (const std::exception& e)
3107 {
Mike Kelly46272802019-08-14 17:00:48 +01003108 return Fail("%s: %s", __func__, e.what());
3109 }
3110
3111 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003112 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003113
3114 ActivationFn activationFunction;
3115 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3116 {
3117 return Fail("%s: Operation has invalid inputs", __func__);
3118 }
3119
3120 armnn::FullyConnectedDescriptor desc;
3121 desc.m_TransposeWeightMatrix = true;
3122 desc.m_BiasEnabled = true;
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003123 desc.m_ConstantWeights = constantWeights;
Mike Kelly46272802019-08-14 17:00:48 +01003124
3125 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003126 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3127 {
Finn Williams49184462020-10-02 13:28:34 +01003128 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003129 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003130 outputInfo.GetShape(),
3131 desc.m_TransposeWeightMatrix))
3132 {
3133 isSupported = false;
3134 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3135 return;
3136 }
3137
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003138 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003139 IsFullyConnectedSupported,
3140 data.m_Backends,
3141 isSupported,
3142 reshapedInfo,
3143 outputInfo,
3144 weightsInfo,
3145 biasInfo,
3146 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003147 };
3148
3149 if(!IsDynamicTensor(outputInfo))
3150 {
3151 validateFunc(outputInfo, isSupported);
3152 }
3153 else
3154 {
3155 isSupported = AreDynamicTensorsSupported();
3156 }
3157
Mike Kelly46272802019-08-14 17:00:48 +01003158 if (!isSupported)
3159 {
3160 return false;
3161 }
3162
3163 armnn::IConnectableLayer* startLayer =
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003164 data.m_Network->AddFullyConnectedLayer(desc,
3165 optionalWeights,
3166 optionalBias);
Mike Kelly46272802019-08-14 17:00:48 +01003167
Kevin Mayfcf2a152020-09-08 16:06:32 +01003168 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003169 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003170 armnn::ReshapeDescriptor reshapeDescriptor;
3171 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003172
Kevin Mayfcf2a152020-09-08 16:06:32 +01003173 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3174 assert(reshapeLayer != nullptr);
3175 input.Connect(reshapeLayer->GetInputSlot(0));
3176 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3177 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003178 }
3179 else
3180 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003181 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003182 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003183
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003184 // connect weights input
3185 if (!desc.m_ConstantWeights)
3186 {
3187 weightsInput.Connect(startLayer->GetInputSlot(1));
3188 biasInput.Connect(startLayer->GetInputSlot(2));
3189 }
3190
Kevin Mayfcf2a152020-09-08 16:06:32 +01003191 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3192 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003193}
3194
3195template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003196 typename HalOperation = typename HalPolicy::Operation,
3197 typename HalModel = typename HalPolicy::Model>
3198bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003199{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003200 using HalOperand = typename HalPolicy::Operand;
3201
Mike Kelly999e2092019-08-15 10:46:46 +01003202 if (operation.inputs.size() != 1)
3203 {
3204 return Fail("%s: Optional inputs are not supported", __func__);
3205 }
3206
Mike Kelly46272802019-08-14 17:00:48 +01003207 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3208 if (!input.IsValid())
3209 {
3210 return Fail("%s: Operation has invalid inputs", __func__);
3211 }
3212
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003213 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003214 if (!output)
3215 {
3216 return Fail("%s: Could not read output 0", __func__);
3217 }
3218
3219 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3220 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3221
Mike Kelly46272802019-08-14 17:00:48 +01003222 if (outputInfo.GetNumDimensions() != 4u)
3223 {
3224 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3225 }
3226
3227 armnn::L2NormalizationDescriptor desc;
3228 desc.m_DataLayout = armnn::DataLayout::NHWC;
3229
3230 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003231 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3232 {
3233 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3234 IsL2NormalizationSupported,
3235 data.m_Backends,
3236 isSupported,
3237 inputInfo,
3238 outputInfo,
3239 desc);
3240 };
3241
3242 if(!IsDynamicTensor(outputInfo))
3243 {
3244 validateFunc(outputInfo, isSupported);
3245 }
3246 else
3247 {
3248 isSupported = AreDynamicTensorsSupported();
3249 }
3250
Mike Kelly46272802019-08-14 17:00:48 +01003251 if (!isSupported)
3252 {
3253 return false;
3254 }
3255
3256 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3257 assert(layer != nullptr);
3258 input.Connect(layer->GetInputSlot(0));
3259
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003260 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003261}
3262
3263template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003264 typename HalOperation = typename HalPolicy::Operation,
3265 typename HalModel = typename HalPolicy::Model>
3266bool ConvertLocalResponseNormalization(const HalOperation& operation,
3267 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003268 ConversionData& data)
3269{
Mike Kelly999e2092019-08-15 10:46:46 +01003270 if (operation.inputs.size() != 5)
3271 {
3272 return Fail("%s: Optional inputs are not supported", __func__);
3273 }
3274
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003275 using HalOperand = typename HalPolicy::Operand;
3276 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003277
3278 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3279 if (!input.IsValid())
3280 {
3281 return Fail("%s: Operation has invalid inputs", __func__);
3282 }
3283
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003284 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003285 if (!output)
3286 {
3287 return Fail("%s: Could not read output 0", __func__);
3288 }
3289
3290 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3291 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3292
Mike Kelly46272802019-08-14 17:00:48 +01003293 if (outputInfo.GetNumDimensions() != 4u)
3294 {
3295 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3296 }
3297
3298 armnn::NormalizationDescriptor descriptor;
3299 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3300 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3301 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3302
3303 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003304 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003305 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3306 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3307 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3308 {
3309 return Fail("%s: Operation has invalid inputs", __func__);
3310 }
3311
3312 // ArmNN expects normSize to be the full size of the normalization
3313 // window rather than the radius as in AndroidNN.
3314 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3315
3316 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003317 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3318 {
3319 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3320 IsNormalizationSupported,
3321 data.m_Backends,
3322 isSupported,
3323 inputInfo,
3324 outputInfo,
3325 descriptor);
3326 };
3327
3328 if(!IsDynamicTensor(outputInfo))
3329 {
3330 validateFunc(outputInfo, isSupported);
3331 }
3332 else
3333 {
3334 isSupported = AreDynamicTensorsSupported();
3335 }
3336
Mike Kelly46272802019-08-14 17:00:48 +01003337 if (!isSupported)
3338 {
3339 return false;
3340 }
3341
3342
3343 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3344 assert(layer != nullptr);
3345 input.Connect(layer->GetInputSlot(0));
3346
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003347 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003348}
3349
3350template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003351 typename HalOperation = typename HalPolicy::Operation,
3352 typename HalModel = typename HalPolicy::Model>
3353bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003354{
Mike Kelly46272802019-08-14 17:00:48 +01003355 armnn::ActivationDescriptor desc;
3356 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3357
3358 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3359}
3360
3361template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003362 typename HalOperation = typename HalPolicy::Operation,
3363 typename HalModel = typename HalPolicy::Model>
3364bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003365{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003366 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003367
3368 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3369 if (!input.IsValid())
3370 {
3371 return Fail("%s: Operation has invalid inputs", __func__);
3372 }
3373
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003374 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003375 if (!output)
3376 {
3377 return Fail("%s: Could not read output 0", __func__);
3378 }
3379
3380 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003381
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003382 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003383 if (!axisOperand)
3384 {
3385 return Fail("%s: Could not read input 1", __func__);
3386 }
3387
3388 std::vector<int32_t> axis;
3389 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3390 {
3391 return Fail("%s: Input 1 has invalid values", __func__);
3392 }
3393
3394 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3395
3396 // Convert the axis to unsigned int and remove duplicates.
3397 unsigned int rank = inputInfo.GetNumDimensions();
3398 std::set<unsigned int> uniqueAxis;
3399 std::transform(axis.begin(), axis.end(),
3400 std::inserter(uniqueAxis, uniqueAxis.begin()),
3401 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3402
3403 // Get the "keep dims" flag.
3404 int32_t keepDims = 0;
3405 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3406 {
3407 return Fail("%s: Could not read input 2", __func__);
3408 }
3409
3410 armnn::MeanDescriptor descriptor;
3411 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3412 descriptor.m_KeepDims = keepDims > 0;
3413
3414 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003415 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3416 {
3417 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3418 IsMeanSupported,
3419 data.m_Backends,
3420 isSupported,
3421 inputInfo,
3422 outputInfo,
3423 descriptor);
3424 };
3425
3426 if(!IsDynamicTensor(outputInfo))
3427 {
3428 validateFunc(outputInfo, isSupported);
3429 }
3430 else
3431 {
3432 isSupported = AreDynamicTensorsSupported();
3433 }
3434
Mike Kelly46272802019-08-14 17:00:48 +01003435 if (!isSupported)
3436 {
3437 return false;
3438 }
3439
3440 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3441 assert(layer != nullptr);
3442 input.Connect(layer->GetInputSlot(0));
3443
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003444 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003445}
3446
3447template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003448 typename HalOperation = typename HalPolicy::Operation,
3449 typename HalModel = typename HalPolicy::Model>
3450bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003451{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003452 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003453
3454 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3455 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3456
3457 if (!input0.IsValid() || !input1.IsValid())
3458 {
3459 return Fail("%s: Operation has invalid inputs", __func__);
3460 }
3461
3462 // The FuseActivation parameter is always the input index 2
3463 // and it should be optional
3464 ActivationFn activationFunction;
3465 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3466 {
3467 return Fail("%s: Operation has invalid inputs", __func__);
3468 }
3469
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003470 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003471
3472 if (outputOperand == nullptr)
3473 {
3474 return false;
3475 }
3476
3477 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003478
3479 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003480 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3481 {
3482 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3483 IsMultiplicationSupported,
3484 data.m_Backends,
3485 isSupported,
3486 input0.GetTensorInfo(),
3487 input1.GetTensorInfo(),
3488 outputInfo);
3489 };
3490
3491 if(!IsDynamicTensor(outputInfo))
3492 {
3493 validateFunc(outputInfo, isSupported);
3494 }
3495 else
3496 {
3497 isSupported = AreDynamicTensorsSupported();
3498 }
3499
Mike Kelly46272802019-08-14 17:00:48 +01003500 if (!isSupported)
3501 {
3502 return false;
3503 }
3504
3505 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003506
Kevin Mayfcf2a152020-09-08 16:06:32 +01003507 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3508 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003509 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003510 return false;
3511 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003512
Kevin Mayfcf2a152020-09-08 16:06:32 +01003513 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3514 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003515}
3516
3517template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003518 typename HalOperation = typename HalPolicy::Operation,
3519 typename HalModel = typename HalPolicy::Model>
3520bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003521{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003522 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003523
Mike Kelly3c673942019-07-25 09:26:06 +01003524 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3525 if (!input.IsValid())
3526 {
3527 return Fail("%s: Operation has invalid inputs", __func__);
3528 }
3529
3530 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3531 unsigned int rank = inputInfo.GetNumDimensions();
3532
3533 armnn::PadDescriptor descriptor;
3534 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3535 {
3536 return Fail("%s: Could not convert paddings", __func__);
3537 }
3538
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003539 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3540 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003541 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3542 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3543 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003544 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003545 {
3546 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3547 }
3548
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003549 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003550 if (!output)
3551 {
3552 return Fail("%s: Could not read output", __func__);
3553 }
3554
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003555 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003556
3557 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003558 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3559 {
3560 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3561 IsPadSupported,
3562 data.m_Backends,
3563 isSupported,
3564 inputInfo,
3565 outputInfo,
3566 descriptor);
3567 };
3568
3569 if(!IsDynamicTensor(outputInfo))
3570 {
3571 validateFunc(outputInfo, isSupported);
3572 }
3573 else
3574 {
3575 isSupported = AreDynamicTensorsSupported();
3576 }
3577
Mike Kelly3c673942019-07-25 09:26:06 +01003578 if (!isSupported)
3579 {
3580 return false;
3581 }
3582
3583 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3584 assert(layer != nullptr);
3585 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003586
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003587 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003588}
3589
Mike Kelly0a879362019-07-29 16:56:31 +01003590template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003591 typename HalOperation = typename HalPolicy::Operation,
3592 typename HalModel = typename HalPolicy::Model>
3593bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003594{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003595 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003596
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003597 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3598 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3599 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003600
3601 if (inputOperand == nullptr
3602 || requestedShapeOperand == nullptr
3603 || outputOperand == nullptr)
3604 {
3605 return Fail("%s: Operation has invalid inputs", __func__);
3606 }
3607
3608 if (requestedShapeOperand->dimensions.size() != 1)
3609 {
3610 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3611 __func__, requestedShapeOperand->dimensions.size());
3612 }
3613
3614 std::vector<int32_t> targetDimensions;
3615 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3616 {
3617 return Fail("%s: Could not read values of input 1", __func__);
3618 }
3619
3620 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3621
3622 Shape requestedShape;
3623 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3624 // function that resolves these values into a fully specified tensor shape.
3625 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3626 {
3627 return Fail("%s: Failed to resolve the requested shape", __func__);
3628 }
3629
Mike Kelly46272802019-08-14 17:00:48 +01003630 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3631 if (!input.IsValid())
3632 {
3633 return Fail("%s: Could not read input 0", __func__);
3634 }
3635
3636 armnn::ReshapeDescriptor reshapeDescriptor;
3637 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3638 requestedShape.dimensions.data());
3639
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003640 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3641
Mike Kelly46272802019-08-14 17:00:48 +01003642 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003643 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3644 {
3645 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3646 IsReshapeSupported,
3647 data.m_Backends,
3648 isSupported,
3649 input.GetTensorInfo(),
3650 outputInfo,
3651 reshapeDescriptor);
3652 };
3653
3654 if(!IsDynamicTensor(outputInfo))
3655 {
3656 validateFunc(outputInfo, isSupported);
3657 }
3658 else
3659 {
3660 isSupported = AreDynamicTensorsSupported();
3661 }
3662
Mike Kelly46272802019-08-14 17:00:48 +01003663 if (!isSupported)
3664 {
3665 return false;
3666 }
3667
3668 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3669 assert(layer != nullptr);
3670 input.Connect(layer->GetInputSlot(0));
3671
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003672 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003673}
3674
3675template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003676 typename HalOperation = typename HalPolicy::Operation,
3677 typename HalModel = typename HalPolicy::Model>
3678bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003679{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003680 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003681
Mike Kelly0a879362019-07-29 16:56:31 +01003682 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3683 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3684
3685 if (!input0.IsValid() || !input1.IsValid())
3686 {
3687 return Fail("%s: Operation has invalid inputs", __func__);
3688 }
3689
3690 // The FuseActivation parameter is always the input index 2
3691 // and it should be optional
3692 ActivationFn activationFunction;
3693 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3694 {
3695 return Fail("%s: Operation has invalid inputs", __func__);
3696 }
3697
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003698 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003699 if (!output)
3700 {
3701 return Fail("%s: Could not read output 0", __func__);
3702 }
3703
3704 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003705
3706 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003707 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3708 {
3709 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3710 IsSubtractionSupported,
3711 data.m_Backends,
3712 isSupported,
3713 input0.GetTensorInfo(),
3714 input1.GetTensorInfo(),
3715 outputInfo);
3716 };
3717
3718 if(IsDynamicTensor(outputInfo))
3719 {
3720 isSupported = AreDynamicTensorsSupported();
3721 }
3722 else
3723 {
3724 validateFunc(outputInfo, isSupported);
3725 }
3726
Mike Kelly0a879362019-07-29 16:56:31 +01003727 if (!isSupported)
3728 {
3729 return false;
3730 }
3731
3732 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003733
Kevin Mayfcf2a152020-09-08 16:06:32 +01003734 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3735 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003736 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003737 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003738 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003739 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3740 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003741}
3742
Finn Williams23b87b32019-07-30 11:44:05 +01003743template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003744 typename HalOperation = typename HalPolicy::Operation,
3745 typename HalModel = typename HalPolicy::Model>
3746bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003747{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003748 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003749
3750 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3751 if (!input.IsValid())
3752 {
3753 return Fail("%s: Operation has invalid inputs", __func__);
3754 }
3755
3756 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3757 unsigned int rank = inputInfo.GetNumDimensions();
3758 if (rank > 4)
3759 {
3760 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3761 }
3762
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003763 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003764 if (!output)
3765 {
3766 return Fail("%s: Could not read output 0", __func__);
3767 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003768
3769 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003770 {
3771 return Fail("%s: Dynamic output tensors are not supported", __func__);
3772 }
3773
3774 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3775 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003776 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003777
3778 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3779
3780 std::vector<int32_t> axis;
3781 if (!axisOperand)
3782 {
3783 axis.assign(dimensionSequence,
3784 dimensionSequence + rank);
3785 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003786 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003787 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003788 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003789 }
3790
3791 std::vector<uint32_t> outputDims;
3792 for (unsigned int i = 0; i < rank; i++)
3793 {
3794 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3795 auto currentDimension = inputInfo.GetShape()[i];
3796 if (skipSqueeze || currentDimension != 1)
3797 {
3798 outputDims.push_back(currentDimension);
3799 }
3800 }
3801
3802 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3803
3804 armnn::TensorInfo outputInfo = inputInfo;
3805 outputInfo.SetShape(outShape);
3806
3807 armnn::ReshapeDescriptor reshapeDesc;
3808 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3809
3810 bool isSupported = false;
3811 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3812 IsReshapeSupported,
3813 data.m_Backends,
3814 isSupported,
3815 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003816 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003817 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003818
Mike Kelly46272802019-08-14 17:00:48 +01003819 if (!isSupported)
3820 {
3821 return false;
3822 }
3823
3824 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3825 assert(layer != nullptr);
3826 input.Connect(layer->GetInputSlot(0));
3827
3828 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3829}
3830
3831template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003832 typename HalOperation = typename HalPolicy::Operation,
3833 typename HalModel = typename HalPolicy::Model>
3834bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003835{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003836 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003837
3838 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3839 if (!input.IsValid())
3840 {
3841 return Fail("%s: Operation has invalid inputs", __func__);
3842 }
3843
3844 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3845 unsigned int rank = inputInfo.GetNumDimensions();
3846 if (rank > 4)
3847 {
3848 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3849 }
3850
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003851 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003852 if (!output)
3853 {
3854 return Fail("%s: Could not read output 0", __func__);
3855 }
3856
3857 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003858
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003859 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3860 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3861 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003862
3863 std::vector<int32_t> beginValues;
3864 std::vector<int32_t> endValues;
3865 std::vector<int32_t> stridesValues;
3866
3867 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003868 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003869 {
3870 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3871 {
3872 return false;
3873 }
3874
3875 if (operandValues.size() != rank)
3876 {
3877 return false;
3878 }
3879
3880 return true;
3881 };
3882
3883 if (!ValidateInputOperands(*beginOperand, beginValues)
3884 || !ValidateInputOperands(*endOperand, endValues)
3885 || !ValidateInputOperands(*stridesOperand, stridesValues))
3886 {
3887 return Fail("%s: Operation has invalid input operand", __func__);
3888 }
3889
3890 // Stride cannot have value '0'
3891 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3892 {
3893 return Fail("%s: Stride must be non-zero value.", __func__);
3894 }
3895
3896 armnn::StridedSliceDescriptor descriptor;
3897 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3898 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3899 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3900 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3901
3902 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3903 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3904 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3905 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3906 {
3907 return Fail("%s: Operation has invalid inputs", __func__);
3908 }
3909
3910 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003911 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3912 {
3913 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3914 IsStridedSliceSupported,
3915 data.m_Backends,
3916 isSupported,
3917 inputInfo,
3918 outputInfo,
3919 descriptor);
3920 };
3921
3922 if(IsDynamicTensor(outputInfo))
3923 {
3924 isSupported = AreDynamicTensorsSupported();
3925 }
3926 else
3927 {
3928 validateFunc(outputInfo, isSupported);
3929 }
3930
Mike Kelly46272802019-08-14 17:00:48 +01003931 if (!isSupported)
3932 {
3933 return false;
3934 }
3935
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003936 // Check if slice can fit in a inferred output
3937 armnn::TensorShape inputShape = inputInfo.GetShape();
3938 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3939 {
3940 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003941
3942 if (descriptor.m_ShrinkAxisMask & (1 << i))
3943 {
3944 // If the difference between the start point and the end point of the slice on an axis being shrunk
3945 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3946 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3947 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3948 {
3949 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3950 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003951
3952 if(stride < 0)
3953 {
3954 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3955 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003956 }
3957 }
3958
Mike Kelly46272802019-08-14 17:00:48 +01003959 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3960 assert(layer != nullptr);
3961 input.Connect(layer->GetInputSlot(0));
3962
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003963 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003964}
3965
3966template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003967 typename HalOperation = typename HalPolicy::Operation,
3968 typename HalModel = typename HalPolicy::Model>
3969bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003970{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003971 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003972 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003973
3974 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3975 if (!input.IsValid())
3976 {
3977 return Fail("%s: Operation has invalid inputs", __func__);
3978 }
3979
3980 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3981 unsigned int rank = inputInfo.GetNumDimensions();
3982 if (rank > 4)
3983 {
3984 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3985 }
3986
3987 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3988 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003989 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003990
3991 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003992 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003993 {
Mike Kelly46272802019-08-14 17:00:48 +01003994 for (unsigned int i = rank; i > 0; i--)
3995 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003996 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003997 }
3998 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003999 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01004000 {
Mike Kellyeec836e2020-02-18 10:03:30 +00004001 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01004002 }
4003
4004 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
4005
Mike Kelly4a956582020-02-28 10:32:09 +00004006 armnn::TransposeDescriptor transposeDesc;
4007 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01004008
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004009 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004010 if (!output)
4011 {
4012 return Fail("%s: Could not read output 0", __func__);
4013 }
4014
4015 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4016
4017 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004018 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4019 {
4020 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4021 IsTransposeSupported,
4022 data.m_Backends,
4023 isSupported,
4024 inputInfo,
4025 outputInfo,
4026 transposeDesc);
4027 };
4028
4029 if(IsDynamicTensor(outputInfo))
4030 {
4031 isSupported = AreDynamicTensorsSupported();
4032 }
4033 else
4034 {
4035 validateFunc(outputInfo, isSupported);
4036 }
4037
Mike Kelly46272802019-08-14 17:00:48 +01004038 if (!isSupported)
4039 {
4040 return false;
4041 }
4042
Mike Kelly4a956582020-02-28 10:32:09 +00004043 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01004044 assert(layer != nullptr);
4045 input.Connect(layer->GetInputSlot(0));
4046
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004047 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004048}
4049
4050template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004051 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004052 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004053 typename HalModel = typename HalPolicy::Model>
4054bool ConvertBatchToSpaceNd(const HalOperation& operation,
4055 const HalModel& model,
4056 ConversionData& data)
4057{
Finn Williams23b87b32019-07-30 11:44:05 +01004058
4059 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4060 if (!input.IsValid())
4061 {
4062 return Fail("%s: Operation has invalid inputs", __func__);
4063 }
4064
4065 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4066 if (!output)
4067 {
4068 return Fail("%s: Could not read output 0", __func__);
4069 }
4070
4071 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004072
4073 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4074 if (!blockOperand)
4075 {
4076 return Fail("%s: Could not read input 1", __func__);
4077 }
4078
4079 // Convert the block operand to int32
4080 std::vector<int32_t> block;
4081 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4082 {
4083 return Fail("%s: Input 1 has invalid values", __func__);
4084 }
4085
4086 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4087
4088 unsigned int rank = inputInfo.GetNumDimensions();
4089 if (rank != 4)
4090 {
4091 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4092 }
4093
4094 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4095 {
4096 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4097 " greater than or equal to 1", __func__);
4098 }
4099
4100 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4101 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4102 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4103
Kevin May42477c12020-03-26 13:34:14 +00004104 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004105 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004106 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004107 }
4108 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4109 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4110
4111 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004112 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4113 {
4114 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4115 IsBatchToSpaceNdSupported,
4116 data.m_Backends,
4117 isSupported,
4118 inputInfo,
4119 outputInfo,
4120 batchToSpaceNdDesc);
4121 };
4122
4123 if(!IsDynamicTensor(outputInfo))
4124 {
4125 validateFunc(outputInfo, isSupported);
4126 }
4127 else
4128 {
4129 isSupported = AreDynamicTensorsSupported();
4130 }
4131
4132
Finn Williams23b87b32019-07-30 11:44:05 +01004133 if (!isSupported)
4134 {
4135 return false;
4136 }
4137
4138 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4139 assert(layer != nullptr);
4140 input.Connect(layer->GetInputSlot(0));
4141
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004142 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004143}
Mike Kelly0a879362019-07-29 16:56:31 +01004144
Finn Williamsd74c5052019-07-30 17:06:00 +01004145template<typename HalPolicy,
4146 typename HalOperation = typename HalPolicy::Operation,
4147 typename HalOperand = typename HalPolicy::Operand,
4148 typename HalModel = typename HalPolicy::Model>
4149bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4150{
4151 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4152 if (!input.IsValid())
4153 {
4154 return Fail("%s: Operation has invalid inputs", __func__);
4155 }
4156
4157 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4158 unsigned int rank = inputInfo.GetNumDimensions();
4159 unsigned int spatialDim = rank - 2;
4160
4161 if (rank != 4)
4162 {
4163 Fail("%s: Only inputs with rank 4 are supported", __func__);
4164 }
4165
4166 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4167 if (!output)
4168 {
4169 return Fail("%s: Could not read output 0", __func__);
4170 }
4171
4172 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004173
4174 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4175 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4176
4177 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4178 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4179 {
4180 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4181 }
4182
4183 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004184 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4185 {
4186 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4187 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004188 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4189 {
4190 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4191 }
4192
4193 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4194 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4195 {
4196 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4197 }
4198
4199 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4200 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004201 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4202 {
4203 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4204 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004205 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4206 {
4207 int paddingBeforeInput = paddings[i];
4208 int paddingAfterInput = paddings[i + 1];
4209 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4210 {
4211 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4212 }
4213
4214 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4215 }
4216
4217 armnn::SpaceToBatchNdDescriptor descriptor;
4218 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4219 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4220 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4221
Kevin May42477c12020-03-26 13:34:14 +00004222 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004223 {
4224 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4225 }
4226
4227 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004228 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4229 {
4230 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4231 IsSpaceToBatchNdSupported,
4232 data.m_Backends,
4233 isSupported,
4234 inputInfo,
4235 outputInfo,
4236 descriptor);
4237 };
4238
4239 if(IsDynamicTensor(outputInfo))
4240 {
4241 isSupported = AreDynamicTensorsSupported();
4242 }
4243 else
4244 {
4245 validateFunc(outputInfo, isSupported);
4246 }
4247
Finn Williamsd74c5052019-07-30 17:06:00 +01004248 if (!isSupported)
4249 {
4250 return false;
4251 }
4252
4253 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4254 assert(layer != nullptr);
4255 input.Connect(layer->GetInputSlot(0));
4256
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004257 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004258}
4259
saoste01b8471482018-10-10 09:44:51 +01004260} // namespace armnn_driver