blob: 8f7d5b9b5858c7a0e18870be1941dbfccac8cd8d [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000012#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010013#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
James Ward4e22f602020-10-20 15:50:33 +010024#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010025
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
Kevin Mayec1e5b82020-02-26 17:00:39 +000036#ifdef ARMNN_ANDROID_R
37using OperandType = android::nn::hal::OperandType;
38#endif
39
Sadik Armagan188675f2021-02-12 17:16:42 +000040#ifdef ARMNN_ANDROID_S
41#include <nnapi/Types.h>
42#endif
43
44
arovir01b0717b52018-09-05 17:03:25 +010045struct ConversionData
46{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010047 ConversionData(const std::vector<armnn::BackendId>& backends)
48 : m_Backends(backends)
49 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010050 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010051 {}
52
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010053 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010054 armnn::INetworkPtr m_Network;
55 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
56 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010057 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010058};
59
60class LayerInputHandle
61{
62public:
63 LayerInputHandle();
64 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
65
66 bool IsValid() const;
67
68 void Connect(armnn::IInputSlot& inputSlot);
69
Finn Williamsa4983ce2020-07-23 12:55:12 +010070 void Disconnect(armnn::IInputSlot& inputSlot);
71
arovir01b0717b52018-09-05 17:03:25 +010072 const armnn::TensorInfo& GetTensorInfo() const;
73
74private:
75 armnn::IOutputSlot* m_OutputSlot;
76 bool m_Valid;
77 armnn::TensorInfo m_TensorInfo;
78};
79
80class ConstTensorPin
81{
82public:
83 // Creates an invalid tensor pin (can be used to signal errors)
84 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
85 ConstTensorPin(bool optional = false);
86
87 // @param tensorInfo TensorInfo associated with the tensor.
88 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
89 // the model being converted.
90 // @param numBytes Number of bytes for the tensor data.
Jan Eilersa71c0632021-04-12 13:12:19 +010091 ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
arovir01b0717b52018-09-05 17:03:25 +010092 const armnn::PermutationVector& mappings);
93
94 ConstTensorPin(const ConstTensorPin& other) = delete;
95 ConstTensorPin(ConstTensorPin&& other) = default;
96
97 bool IsValid() const;
98 bool IsOptional() const;
99
100 const armnn::ConstTensor& GetConstTensor() const;
101 const armnn::ConstTensor* GetConstTensorPtr() const;
102
103private:
104 armnn::ConstTensor m_ConstTensor;
105
106 // Owned memory for swizzled tensor data, only required if the tensor needed
107 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
108 // the pools associated with the model being converted.
109 std::vector<uint8_t> m_SwizzledTensorData;
110
111 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
112 bool m_Optional;
113};
114
115} // namespace armnn_driver
116
117///
118/// Utility functions
119///
120
121namespace
122{
123
124using namespace armnn_driver;
125using namespace android::nn;
126
127// Convenience function to log the reason for failing to convert a model.
128// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
129template<class... Args>
130static bool Fail(const char* formatStr, Args&&... args)
131{
132 ALOGD(formatStr, std::forward<Args>(args)...);
133 return false;
134}
135
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100136// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
137// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
138#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139try \
140{ \
141 for (auto&& backendId : backends) \
142 { \
143 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh01824732021-01-28 14:26:27 +0000144 if (layerSupportObject.IsBackendRegistered()) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100146 std::string reasonIfUnsupported; \
147 supported = \
Francis Murtagh01824732021-01-28 14:26:27 +0000148 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100149 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100150 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100151 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 else \
154 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100155 if (reasonIfUnsupported.size() > 0) \
156 { \
157 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
158 } \
159 else \
160 { \
161 ALOGD("%s: not supported by armnn", funcName); \
162 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100163 } \
164 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100165 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100166 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100167 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100168 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100169 } \
170 if (!supported) \
171 { \
172 ALOGD("%s: not supported by any specified backend", funcName); \
173 } \
174} \
175catch (const armnn::InvalidArgumentException &e) \
176{ \
177 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
178}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100179
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000180template<typename HalOperand>
181armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100182{
183 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
184}
185
Matthew Bentham912b3622019-05-03 15:49:14 +0100186inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100187{
Matthew Bentham912b3622019-05-03 15:49:14 +0100188 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
189 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100191}
192
Kevin May42477c12020-03-26 13:34:14 +0000193#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194
Keith Davis71006492020-01-06 17:44:16 +0000195// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100196inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
197{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000198 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000199 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000200 type == V1_2::OperandType::TENSOR_FLOAT16 ||
201 type == V1_2::OperandType::TENSOR_FLOAT32 ||
202 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000203 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000204 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
205 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100206 type == V1_2::OperandType::TENSOR_INT32;
207}
208
209#endif
210
Kevin May42477c12020-03-26 13:34:14 +0000211#ifdef ARMNN_ANDROID_NN_V1_3
212
213// Support within the 1.3 driver for specific tensor data types
214inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
215{
216 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100217 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000218 type == V1_3::OperandType::TENSOR_FLOAT16 ||
219 type == V1_3::OperandType::TENSOR_FLOAT32 ||
220 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100221 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000222 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
223 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
224 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
225 type == V1_3::OperandType::TENSOR_INT32;
226}
227
228#endif
229
Mike Kellyb5fdf382019-06-11 16:35:25 +0100230inline bool IsBool(V1_0::Operand)
231{
232 return false;
233}
234
Kevin May42477c12020-03-26 13:34:14 +0000235inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100236{
237 return false;
238}
239
Kevin May42477c12020-03-26 13:34:14 +0000240#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100241
242inline bool IsBool(V1_2::Operand operand)
243{
244 return operand.type == V1_2::OperandType::BOOL;
245}
246
Sadik Armagan61113162019-07-25 09:09:40 +0100247/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000248inline bool Is12OrLaterOperand(V1_2::Operand)
249{
250 return true;
251}
252
253#endif
254
255#ifdef ARMNN_ANDROID_NN_V1_3
256
257inline bool IsBool(V1_3::Operand operand)
258{
259 return operand.type == V1_3::OperandType::BOOL;
260}
261
262/// Checks if a operand is 1_2 Operand
263inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100264{
265 return true;
266}
267
Mike Kellyb5fdf382019-06-11 16:35:25 +0100268#endif
269
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100270template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000271armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
272 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273 armnn::TensorInfo reshapeInfo)
274{
275 armnn::ReshapeDescriptor reshapeDescriptor;
276 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
277
278 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +0100279 if (!reshapeLayer)
280 {
281 throw armnn::RuntimeException("ReshapeLayer is null");
282 }
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100283
284 // Attach the input layer to the reshape layer
285 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
286 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
287
288 return *reshapeLayer;
289}
290
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000291bool BroadcastTensor(LayerInputHandle& input0,
292 LayerInputHandle& input1,
293 armnn::IConnectableLayer* startLayer,
294 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100295{
Mike Kellye2d611e2021-10-14 12:35:58 +0100296 if (!startLayer)
297 {
298 throw armnn::RuntimeException("StartLayer is null");
299 }
arovir01b0717b52018-09-05 17:03:25 +0100300
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
302 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
303
304 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
305 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
306
307 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100308 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100309 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
310 input0.Connect(startLayer->GetInputSlot(0));
311 input1.Connect(startLayer->GetInputSlot(1));
312
Sadik Armagan64b19b52019-08-19 09:49:58 +0100313 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100314 }
315
316 // Since the number of dimensions do not match then we need to add degenerate dimensions
317 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
318
319 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100320 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
321 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100322
323 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
324 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
325 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
326
327 const armnn::TensorShape& smallShape = smallInfo.GetShape();
328 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
329 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
330 {
331 reshapedDimensions[i] = smallShape[i - sizeDifference];
332 }
333
334 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100335 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100336 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100337
338 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
339 armnn::ReshapeDescriptor reshapeDescriptor;
340
341 bool isSupported = false;
342 FORWARD_LAYER_SUPPORT_FUNC(__func__,
343 IsReshapeSupported,
344 data.m_Backends,
345 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000346 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100347 reshapedInfo,
348 reshapeDescriptor);
349 if (!isSupported)
350 {
351 return false;
352 }
353
Mike Kellye2d611e2021-10-14 12:35:58 +0100354 if (!data.m_Network)
355 {
356 throw armnn::RuntimeException("Network is null");
357 }
358
Sadik Armagan64b19b52019-08-19 09:49:58 +0100359 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100360
361 if (input0IsSmaller)
362 {
363 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
364 //
365 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100366 // | |
367 // Reshape |
368 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100369 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100370
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100371 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
372 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100373 }
374 else
375 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100376 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
377 //
378 // Input0 Input1
379 // | |
380 // | Reshape
381 // \ /
382 // StartLayer
383
arovir01b0717b52018-09-05 17:03:25 +0100384 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100385 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100386 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100387
388 return true;
arovir01b0717b52018-09-05 17:03:25 +0100389}
390
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000391void CalcPadding(uint32_t input,
392 uint32_t kernel,
393 uint32_t stride,
394 uint32_t& outPadHead,
395 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100396 android::nn::PaddingScheme scheme)
397{
398 int32_t padHead;
399 int32_t padTail;
400 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100401 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
402 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100403}
404
Kevin May42477c12020-03-26 13:34:14 +0000405#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100406
407void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
408 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
409{
410 int32_t padHead;
411 int32_t padTail;
412 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100413 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
414 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100415}
416
Mike Kelly26123db2020-01-15 10:02:33 +0000417void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100418 int32_t& outPadTail, android::nn::PaddingScheme scheme)
419{
420 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
421}
422
Mike Kelly86b36d42019-07-12 16:39:33 +0100423#endif
424
Matthew Bentham912b3622019-05-03 15:49:14 +0100425Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100426{
427 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100428 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100429 shape.dimensions = operand.dimensions;
430 shape.scale = operand.scale;
431 shape.offset = operand.zeroPoint;
432 return shape;
433}
434
Kevin May42477c12020-03-26 13:34:14 +0000435#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100436
437Shape GetOperandShape(const V1_2::Operand& operand)
438{
439 Shape shape;
440 shape.type = OperandType(operand.type);
441 shape.dimensions = operand.dimensions;
442 shape.scale = operand.scale;
443 shape.offset = operand.zeroPoint;
444 return shape;
445}
446
447#endif
448
Kevin May42477c12020-03-26 13:34:14 +0000449#ifdef ARMNN_ANDROID_NN_V1_3
450
451Shape GetOperandShape(const V1_3::Operand& operand)
452{
453 Shape shape;
454 shape.type = OperandType(operand.type);
455 shape.dimensions = operand.dimensions;
456 shape.scale = operand.scale;
457 shape.offset = operand.zeroPoint;
458 return shape;
459}
460
461#endif
462
arovir01b0717b52018-09-05 17:03:25 +0100463// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
464// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100465// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
466// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100467void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000468 const armnn::TensorInfo& weightInfo,
469 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100470{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000471 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100472 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000473 // NOTE: Bias scale is always set to 0 for per-axis quantization and
474 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
475 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100476 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000477 return biasScale * inputInfo.GetQuantizationScale();
478 };
479
480 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
481 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
482
483 biasInfo.SetQuantizationScales(biasScales);
Jan Eilersa20d2b82021-04-27 09:21:08 +0100484 // bias is expected to be a 1d tensor, set qdim=0
485 biasInfo.SetQuantizationDim(0);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000486
487 ALOGV("Bias quantization params have been updated for per-axis quantization");
488 }
489 else
490 {
491 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
492 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
493 {
James Ward4e22f602020-10-20 15:50:33 +0100494 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000495 {
496 ALOGW("Bias quantization scale has been modified to match input * weights");
497 biasInfo.SetQuantizationScale(expectedBiasScale);
498 }
arovir01b0717b52018-09-05 17:03:25 +0100499 }
500 }
501}
502
503// 4D Tensor Permutations
504const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100505const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100506const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
507
508// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000509const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
510const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100511
512template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000513armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
514 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100515{
516 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000517 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
Mike Kellye2d611e2021-10-14 12:35:58 +0100518 if (!layer)
519 {
520 throw armnn::RuntimeException("TransposeLayer is null");
521 }
arovir01b0717b52018-09-05 17:03:25 +0100522 // Connect input to swizzle layer
523 input.Connect(layer->GetInputSlot(0));
524
525 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000526 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100527 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
528
529 return *layer;
530}
531
arovir01b0717b52018-09-05 17:03:25 +0100532bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
533 const armnn::TensorShape & outputShape,
534 uint32_t concatDim)
535{
536 // Validate the output shape is correct given the input shapes (which have just been validated)
537 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
538 if (outputShape.GetNumDimensions() != numDimensions)
539 {
540 return Fail("%s: Output shape has wrong number of dimensions", __func__);
541 }
542
543 unsigned int outputSizeAlongConcatenatedDimension = 0;
544 for (unsigned int i = 0; i < inputShapes.size(); i++)
545 {
546 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
547 }
548
549 for (unsigned int i = 0; i < numDimensions; ++i)
550 {
551 if (i == concatDim)
552 {
553 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
554 {
555 return Fail(
556 "%s: Invalid output shape for dimension %d (%d != %d)",
557 __func__,
558 i,
559 outputShape[i],
560 outputSizeAlongConcatenatedDimension);
561 }
562 }
563 else
564 {
565 if (outputShape[i] != inputShapes[0][i])
566 {
567 return Fail("%s: Invalid output shape", __func__);
568 }
569 }
570 }
571
572 return true;
573}
574
575bool RequiresReshape(armnn::TensorShape & inputShape)
576{
577 return inputShape.GetNumDimensions() < 3;
578}
579
arovir01b0717b52018-09-05 17:03:25 +0100580void SwizzleInputs(armnn::INetwork& network,
581 std::vector<LayerInputHandle>& inputs,
582 std::vector<armnn::TensorShape>& inputShapes,
583 const armnn::PermutationVector& mapping)
584{
585 if (!mapping.IsEqual(IdentityPermutation4D))
586 {
587 size_t nInputs = inputs.size();
588 for (size_t i=0; i<nInputs; ++i)
589 {
590 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000591 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100592 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
593 auto& outputInfo = outputSlot.GetTensorInfo();
594 // replace inputs with the swizzled ones
595 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
596 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
597 }
598 }
599}
600
Teresa Charlin185f5882020-04-06 21:59:18 +0100601bool TransposeInputTensors(ConversionData& data,
602 std::vector<LayerInputHandle>& inputs,
603 std::vector<armnn::TensorShape>& inputShapes,
604 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000605{
David Monahan7f492ac2020-10-16 10:36:29 +0100606 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
607 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000608 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100609 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000610 size_t nInputs = inputs.size();
611 for (size_t i=0; i<nInputs; ++i)
612 {
613 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000614 armnn::TransposeDescriptor transposeDesc;
615 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100616 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000617
618 bool isSupported = false;
619 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000620 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000621 data.m_Backends,
622 isSupported,
623 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100624 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000625 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000626 if (!isSupported)
627 {
628 return false;
629 }
630
631 }
632 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
633 }
634 return true;
635}
636
637
narpra01f176d5a2018-11-18 20:17:48 +0000638bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
639 int32_t & concatDimension,
640 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100641{
narpra01f176d5a2018-11-18 20:17:48 +0000642 bool needPermute = false;
Mike Kellye2d611e2021-10-14 12:35:58 +0100643
644 if (numberOfDimensions < 3)
645 {
646 return Fail("%s: Invalid numberOfDimensions: %i < 3", __func__, numberOfDimensions);
647 }
arovir01b0717b52018-09-05 17:03:25 +0100648
649 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000650 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
651 // or along dimension 0 or 2 for a 3-D tensor.
652 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100653 {
narpra01f176d5a2018-11-18 20:17:48 +0000654 concatDimension = 1;
655 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
656 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100657 }
narpra01f176d5a2018-11-18 20:17:48 +0000658 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100659 {
narpra01f176d5a2018-11-18 20:17:48 +0000660 concatDimension = 0;
661 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
662 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100663 }
David Monahan7f492ac2020-10-16 10:36:29 +0100664 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
665 // permutation identity to only have 3 dimensions
666 else if (numberOfDimensions == 3 && concatDimension == 2)
667 {
668 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
669 }
narpra01f176d5a2018-11-18 20:17:48 +0000670 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100671}
672
673} // anonymous namespace
674
675namespace armnn_driver
676{
677
678//// Creates an ArmNN activation layer and connects it to the given layer, if the
679//// passed in AndroidNN activation function requires so.
680//// @return The end layer of the sequence of layers built for the given AndroidNN
681//// activation function or nullptr if an error occurred (e.g. unsupported activation).
682//// Note that the end layer matches the input layer if no activation is required
683//// (the sequence of layers has length 1).
684armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
685 ActivationFn activation,
686 armnn::IConnectableLayer* prevLayer,
687 ConversionData& data);
688
689} // namespace armnn_driver
690
691///
692/// Utility templates
693///
694
695namespace armnn_driver
696{
697
698using namespace android::nn;
699
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100700template<typename HalPolicy,
701 typename HalOperand = typename HalPolicy::Operand,
702 typename HalOperation = typename HalPolicy::Operation,
703 typename HalModel = typename HalPolicy::Model>
704const HalOperand* GetInputOperand(const HalOperation& operation,
705 uint32_t inputIndex,
706 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100707 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100708{
709 if (inputIndex >= operation.inputs.size())
710 {
saoste01b8471482018-10-10 09:44:51 +0100711 if (failOnIndexOutOfBounds)
712 {
Mike Kellye2d611e2021-10-14 12:35:58 +0100713 Fail("%s: Invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
saoste01b8471482018-10-10 09:44:51 +0100714 }
arovir01b0717b52018-09-05 17:03:25 +0100715 return nullptr;
716 }
717
Kevin May42477c12020-03-26 13:34:14 +0000718 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100719 if (operation.inputs[inputIndex] >= getMainModel(model).operands.size())
720 {
721 Fail("%s: invalid model index: %i >= %i", __func__, inputIndex, getMainModel(model).operands.size());
722 return nullptr;
723 }
724
Kevin May42477c12020-03-26 13:34:14 +0000725 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100726}
727
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100728template<typename HalPolicy,
729 typename HalOperand = typename HalPolicy::Operand,
730 typename HalOperation = typename HalPolicy::Operation,
731 typename HalModel = typename HalPolicy::Model>
732const HalOperand* GetOutputOperand(const HalOperation& operation,
733 uint32_t outputIndex,
734 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100735{
736 if (outputIndex >= operation.outputs.size())
737 {
738 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
739 return nullptr;
740 }
741
742 // Model should have been validated beforehand
Mike Kellye2d611e2021-10-14 12:35:58 +0100743 if (operation.inputs[outputIndex] >= getMainModel(model).operands.size())
744 {
745 Fail("%s: invalid model index: %i >= %i", __func__, outputIndex, getMainModel(model).operands.size());
746 return nullptr;
747 }
Kevin May42477c12020-03-26 13:34:14 +0000748 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100749}
750
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100751template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100752 typename HalOperand = typename HalPolicy::Operand,
753 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100754const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100755 const HalModel& model,
756 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000757 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100758{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100759 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100760
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100761 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100762 switch (operand.lifetime)
763 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100765 {
766 // Constant found in model.operandValues
767 valueStart = &model.operandValues[operand.location.offset];
768 break;
769 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100770 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100771 {
772 // Constant specified via a Memory object
773 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
774 break;
775 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000777 {
778 // An optional input tensor with no values is not an error so should not register as a fail
779 if (optional)
780 {
781 valueStart = nullptr;
782 break;
783 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100784 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000785 }
arovir01b0717b52018-09-05 17:03:25 +0100786 default:
787 {
788 // Unsupported/invalid (e.g. can't get value of an input to the model)
789 Fail("%s: unsupported/invalid operand lifetime: %s",
790 __func__, toString(operand.lifetime).c_str());
791 valueStart = nullptr;
792 }
793 }
794
795 return valueStart;
796}
797
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100798template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100799 typename HalOperation = typename HalPolicy::Operation,
800 typename HalModel = typename HalPolicy::Model,
801 typename HalOperandType = typename HalPolicy::OperandType>
802bool GetOperandType(const HalOperation& operation,
803 uint32_t inputIndex,
804 const HalModel& model,
805 HalOperandType& type)
806{
807 using HalOperand = typename HalPolicy::Operand;
808
809 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
810 if (!operand)
811 {
812 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
813 }
814
815 type = operand->type;
816 return true;
817}
818
819template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000820 typename HalOperand = typename HalPolicy::Operand>
821bool IsOperandConstant(const HalOperand& operand)
822{
823 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
824
825 HalOperandLifeTime lifetime = operand.lifetime;
826
827 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
828 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
829 lifetime == HalOperandLifeTime::NO_VALUE;
830}
831
832template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100833 typename HalOperand = typename HalPolicy::Operand,
834 typename HalModel = typename HalPolicy::Model>
835ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
836 const HalModel& model,
837 const ConversionData& data,
838 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
839 const armnn::TensorShape* overrideTensorShape = nullptr,
840 bool optional = false)
841{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100842 if (!IsOperandTypeSupportedForTensors(operand.type))
843 {
844 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
845 return ConstTensorPin();
846 }
847
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000848 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100849 {
850 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
851 return ConstTensorPin();
852 }
853
854 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
855 if (!valueStart)
856 {
857 if (optional)
858 {
859 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
860 return ConstTensorPin(true);
861 }
862 // mandatory tensor with no values
863 Fail("%s: failed to get operand address", __func__);
864 return ConstTensorPin();
865 }
866
867 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000868
Matthew Sloyan29cc9612021-07-16 10:21:12 +0100869 // Make sure isConstant flag is set.
870 tensorInfo.SetConstant();
871
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100872 if (overrideTensorShape != nullptr)
873 {
874 tensorInfo.SetShape(*overrideTensorShape);
875 }
876 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
877}
878
879template<typename HalPolicy,
880 typename HalOperation = typename HalPolicy::Operation,
881 typename HalModel = typename HalPolicy::Model>
882ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
883 uint32_t inputIndex,
884 const HalModel& model,
885 const ConversionData& data,
886 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
887 const armnn::TensorShape* overrideTensorShape = nullptr,
888 bool optional = false)
889{
890 using HalOperand = typename HalPolicy::Operand;
891
892 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
893 if (!operand)
894 {
895 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
896 return ConstTensorPin();
897 }
898 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
899 model,
900 data,
901 dimensionMappings,
902 overrideTensorShape,
903 optional);
904}
905
906template<typename HalPolicy,
907 typename OutputType,
908 typename HalOperandType = typename HalPolicy::OperandType,
909 typename HalOperation = typename HalPolicy::Operation,
910 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100911bool GetInputScalar(const HalOperation& operation,
912 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100913 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100914 OutputType& outValue,
915 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100916 const ConversionData& data,
917 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100918{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100919 using HalOperand = typename HalPolicy::Operand;
920
921 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100922 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100923 {
924 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
925 }
926
Sadik Armagan813f2302020-05-19 14:10:30 +0100927 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100928 {
929 return Fail("%s: unexpected operand type: %s (should be %s)",
930 __func__, toString(operand->type).c_str(), toString(type).c_str());
931 }
932
Sadik Armagan813f2302020-05-19 14:10:30 +0100933 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100934 {
935 return Fail("%s: incorrect operand location length: %i (should be %i)",
936 __func__, operand->location.length, sizeof(OutputType));
937 }
938
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100939 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100940 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100941 {
942 return Fail("%s: failed to get address for operand", __func__);
943 }
944
Sadik Armagan813f2302020-05-19 14:10:30 +0100945 if(!optional)
946 {
947 outValue = *(static_cast<const OutputType*>(valueAddress));
948 }
949
arovir01b0717b52018-09-05 17:03:25 +0100950 return true;
951}
952
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100953template<typename HalPolicy,
954 typename HalOperation = typename HalPolicy::Operation,
955 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100956bool GetInputInt32(const HalOperation& operation,
957 uint32_t inputIndex,
958 int32_t& outValue,
959 const HalModel& model,
960 const ConversionData& data)
961{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100963}
964
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100965template<typename HalPolicy,
966 typename HalOperation = typename HalPolicy::Operation,
967 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100968bool GetInputFloat32(const HalOperation& operation,
969 uint32_t inputIndex,
970 float& outValue,
971 const HalModel& model,
972 const ConversionData& data)
973{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100974 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100975}
976
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100977template<typename HalPolicy,
978 typename HalOperation = typename HalPolicy::Operation,
979 typename HalOperandType = typename HalPolicy::OperandType,
980 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100981bool GetInputActivationFunctionImpl(const HalOperation& operation,
982 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100983 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100984 ActivationFn& outActivationFunction,
985 const HalModel& model,
986 const ConversionData& data)
987{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100988 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100989 {
990 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
991 __func__,
992 toString(type).c_str(),
Sadik Armagan188675f2021-02-12 17:16:42 +0000993 toString(HalOperandType::INT32).c_str(),
994 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100995 }
996
997 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100999 {
1000 return Fail("%s: failed to get activation input value", __func__);
1001 }
1002 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1003 return true;
1004}
1005
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001006template<typename HalPolicy,
1007 typename HalOperation = typename HalPolicy::Operation,
1008 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001009bool GetInputActivationFunction(const HalOperation& operation,
1010 uint32_t inputIndex,
1011 ActivationFn& outActivationFunction,
1012 const HalModel& model,
1013 const ConversionData& data)
1014{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001015 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1016 inputIndex,
1017 HalPolicy::OperandType::INT32,
1018 outActivationFunction,
1019 model,
1020 data);
arovir01b0717b52018-09-05 17:03:25 +01001021}
1022
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023template<typename HalPolicy,
1024 typename HalOperation = typename HalPolicy::Operation,
1025 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001026bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1027 uint32_t inputIndex,
1028 ActivationFn& outActivationFunction,
1029 const HalModel& model,
1030 const ConversionData& data)
1031{
1032 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1034 inputIndex,
1035 HalPolicy::OperandType::INT32,
1036 outActivationFunction,
1037 model,
1038 data);
arovir01b0717b52018-09-05 17:03:25 +01001039}
1040
1041
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042template<typename HalPolicy,
1043 typename HalOperation = typename HalPolicy::Operation,
1044 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001045bool GetOptionalInputActivation(const HalOperation& operation,
1046 uint32_t inputIndex,
1047 ActivationFn& activationFunction,
1048 const HalModel& model,
1049 const ConversionData& data)
1050{
1051 if (operation.inputs.size() <= inputIndex)
1052 {
1053 activationFunction = ActivationFn::kActivationNone;
1054 }
1055 else
1056 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001057 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001058 {
1059 return Fail("%s: Operation has invalid inputs", __func__);
1060 }
1061 }
1062 return true;
1063}
1064
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001065template<typename HalPolicy,
1066 typename ConvolutionDescriptor,
1067 typename HalOperation = typename HalPolicy::Operation,
1068 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001069bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1070 uint32_t dilationXIndex,
1071 ConvolutionDescriptor& descriptor,
1072 const HalModel& model,
1073 const ConversionData& data)
1074{
1075 bool success = true;
1076 if (operation.inputs.size() >= dilationXIndex + 2)
1077 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001078 success &= GetInputScalar<HalPolicy>(operation,
1079 dilationXIndex,
1080 HalPolicy::OperandType::INT32,
1081 descriptor.m_DilationX,
1082 model,
1083 data);
1084 success &= GetInputScalar<HalPolicy>(operation,
1085 dilationXIndex + 1,
1086 HalPolicy::OperandType::INT32,
1087 descriptor.m_DilationY,
1088 model,
1089 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001090 }
1091
1092 return success;
1093}
1094
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001095template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001096 typename HalOperation = typename HalPolicy::Operation,
1097 typename HalModel = typename HalPolicy::Model>
1098bool GetOptionalBool(const HalOperation& operation,
1099 uint32_t inputIndex,
1100 const HalModel& model,
1101 const ConversionData& data)
1102{
1103 using HalOperand = typename HalPolicy::Operand;
1104
1105 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1106 if (!operand)
1107 {
1108 return false;
1109 }
1110
1111 if (!IsBool(*operand))
1112 {
1113 return false;
1114 }
1115
1116 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1117 if (!valueAddress)
1118 {
1119 return false;
1120 }
1121
1122 if (*(static_cast<const bool*>(valueAddress)))
1123 {
1124 return true;
1125 }
1126 else
1127 {
1128 return false;
1129 }
1130}
1131
1132template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001133 typename HalOperand = typename HalPolicy::Operand,
1134 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001135bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001136 std::vector<int32_t>& outValues,
1137 const HalModel& model,
1138 const ConversionData& data)
1139{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001140 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001141 {
1142 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1143 }
1144
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001145 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001146 if (!startAddress)
1147 {
1148 return Fail("%s: failed to get operand address", __func__, operand.type);
1149 }
1150
1151 // Check number of bytes is sensible
1152 const uint32_t numBytes = operand.location.length;
1153 if (numBytes % sizeof(int32_t) != 0)
1154 {
1155 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1156 __func__, numBytes, sizeof(int32_t));
1157 }
1158
1159 outValues.resize(numBytes / sizeof(int32_t));
1160 memcpy(outValues.data(), startAddress, numBytes);
1161 return true;
1162}
1163
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164template<typename HalPolicy,
1165 typename HalOperation = typename HalPolicy::Operation,
1166 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001167bool GetInputPaddingScheme(const HalOperation& operation,
1168 uint32_t inputIndex,
1169 PaddingScheme& outPaddingScheme,
1170 const HalModel& model,
1171 const ConversionData& data)
1172{
1173 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001174 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001175 {
1176 return Fail("%s: failed to get padding scheme input value", __func__);
1177 }
1178
1179 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1180 return true;
1181}
1182
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001183template<typename HalPolicy,
1184 typename HalOperation = typename HalPolicy::Operation,
1185 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001186LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1187 uint32_t inputIndex,
1188 const HalModel& model,
1189 ConversionData& data)
1190{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001191 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001192 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001193 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1194
1195 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001196 if (!operand)
1197 {
1198 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1199 return LayerInputHandle();
1200 }
1201
1202 if (!IsOperandTypeSupportedForTensors(operand->type))
1203 {
1204 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1205 return LayerInputHandle();
1206 }
1207
Sadik Armagan44bcc022019-06-18 17:21:36 +01001208 try
arovir01b0717b52018-09-05 17:03:25 +01001209 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001210 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001211 if (IsDynamicTensor(operandTensorInfo))
1212 {
1213 Fail("%s: dynamic input tensors are not supported", __func__);
1214 return LayerInputHandle();
1215 }
arovir01b0717b52018-09-05 17:03:25 +01001216
Sadik Armagan44bcc022019-06-18 17:21:36 +01001217 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001218 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001219 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001220 {
1221 // NOTE: We must check whether we can support the input tensor on at least one
1222 // of the provided backends; otherwise we cannot convert the operation
1223 bool isInputSupported = false;
1224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1225 IsInputSupported,
1226 data.m_Backends,
1227 isInputSupported,
1228 operandTensorInfo);
1229
1230 if (!isInputSupported)
1231 {
1232 Fail("%s: unsupported input tensor", __func__);
1233 return LayerInputHandle();
1234 }
1235
James Ward4e22f602020-10-20 15:50:33 +01001236 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001237 }
1238 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001239 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001240 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001241 // The tensor is either an operand internal to the model, or a model input.
1242 // It can be associated with an ArmNN output slot for an existing layer.
1243
1244 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1245 const uint32_t operandIndex = operation.inputs[inputIndex];
1246 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001247 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001248 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001249 case HalOperandLifeTime::CONSTANT_REFERENCE:
1250 {
1251 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1252 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1253 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001254 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001255 bool isSupported = false;
1256 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1257 IsConstantSupported,
1258 data.m_Backends,
1259 isSupported,
1260 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001261 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001262 {
1263 return LayerInputHandle();
1264 }
1265
1266 armnn::IConnectableLayer* constantLayer =
1267 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1268 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001269 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1270 outputSlot.SetTensorInfo(constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001271
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001272 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001273 }
1274 else
1275 {
1276 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001277 return LayerInputHandle();
1278 }
arovir01b0717b52018-09-05 17:03:25 +01001279 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001280 default:
arovir01b0717b52018-09-05 17:03:25 +01001281 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001282 // Unsupported lifetime for an input tensor
1283 Fail("%s: unsupported lifetime for input tensor: %s",
1284 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001285 return LayerInputHandle();
1286 }
arovir01b0717b52018-09-05 17:03:25 +01001287 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001288 }
1289 catch (UnsupportedOperand<HalOperandType>& e)
1290 {
1291 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1292 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001293 }
1294}
1295
Kevin May42477c12020-03-26 13:34:14 +00001296
1297#ifdef ARMNN_ANDROID_NN_V1_3
1298template<typename HalPolicy>
1299LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1300 uint32_t inputIndex,
1301 const::android::hardware::neuralnetworks::V1_3::Model& model,
1302 ConversionData& data)
1303{
1304 using HalOperand = typename HalPolicy::Operand;
1305 using HalOperandType = typename HalPolicy::OperandType;
1306 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1307
1308 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1309 if (!operand)
1310 {
1311 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1312 return LayerInputHandle();
1313 }
1314
1315 if (!IsOperandTypeSupportedForTensors(operand->type))
1316 {
1317 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1318 return LayerInputHandle();
1319 }
1320
1321 try
1322 {
1323 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001324
Kevin May42477c12020-03-26 13:34:14 +00001325 if (IsDynamicTensor(operandTensorInfo))
1326 {
Finn Williams291a16b2020-08-19 22:54:00 +01001327 data.m_DynamicInputsEncountered = true;
1328
Finn Williams9a044412020-08-17 19:08:35 +01001329 const uint32_t operandIndex = operation.inputs[inputIndex];
1330
1331 // Check if the dynamic input tensors have been inferred by one of the previous layers
1332 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001333 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001334 {
1335 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1336 }
1337 else
1338 {
1339 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1340 return LayerInputHandle();
1341 }
Kevin May42477c12020-03-26 13:34:14 +00001342 }
1343
1344 switch (operand->lifetime)
1345 {
1346 case HalOperandLifeTime::SUBGRAPH_INPUT:
1347 {
1348 // NOTE: We must check whether we can support the input tensor on at least one
1349 // of the provided backends; otherwise we cannot convert the operation
1350 bool isInputSupported = false;
1351 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1352 IsInputSupported,
1353 data.m_Backends,
1354 isInputSupported,
1355 operandTensorInfo);
1356
1357 if (!isInputSupported)
1358 {
1359 Fail("%s: unsupported input tensor", __func__);
1360 return LayerInputHandle();
1361 }
1362
James Ward4e22f602020-10-20 15:50:33 +01001363 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001364 }
1365 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1366 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1367 {
1368 // The tensor is either an operand internal to the model, or a model input.
1369 // It can be associated with an ArmNN output slot for an existing layer.
1370
1371 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1372 const uint32_t operandIndex = operation.inputs[inputIndex];
1373 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1374 }
1375 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1376 case HalOperandLifeTime::CONSTANT_REFERENCE:
1377 {
1378 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1379 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1380 if (tensorPin.IsValid())
1381 {
1382 bool isSupported = false;
1383 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1384 IsConstantSupported,
1385 data.m_Backends,
1386 isSupported,
1387 tensorPin.GetConstTensor().GetInfo());
1388 if (!isSupported)
1389 {
1390 return LayerInputHandle();
1391 }
1392
1393 armnn::IConnectableLayer* constantLayer =
1394 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1395 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001396 armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
1397 outputSlot.SetTensorInfo(constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001398
Matthew Sloyan56c249c2021-08-09 12:49:23 +01001399 return LayerInputHandle(true, &outputSlot, constantTensorInfo);
Kevin May42477c12020-03-26 13:34:14 +00001400 }
1401 else
1402 {
1403 Fail("%s: invalid operand tensor", __func__);
1404 return LayerInputHandle();
1405 }
1406 break;
1407 }
1408 default:
1409 {
1410 // Unsupported lifetime for an input tensor
1411 Fail("%s: unsupported lifetime for input tensor: %s",
1412 __func__, toString(operand->lifetime).c_str());
1413 return LayerInputHandle();
1414 }
1415 }
1416 }
1417 catch (UnsupportedOperand<HalOperandType>& e)
1418 {
1419 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1420 return LayerInputHandle();
1421 }
1422}
1423#endif
1424
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001425template<typename HalPolicy,
1426 typename HalOperation = typename HalPolicy::Operation,
1427 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001428bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1429 uint32_t operationOutputIndex,
1430 armnn::IConnectableLayer& layer,
1431 uint32_t layerOutputIndex,
1432 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001433 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001434 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001435 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001436 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001437 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001438{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001439 using HalOperand = typename HalPolicy::Operand;
1440
1441 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001442 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1443 {
1444 return false;
1445 }
1446
1447 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001448 if (overrideOutputInfo == nullptr)
1449 {
1450 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1451 }
1452 else
1453 {
1454 outputSlot.SetTensorInfo(*overrideOutputInfo);
1455 }
1456
Finn Williamsa4983ce2020-07-23 12:55:12 +01001457 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001458 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001459 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001460 // Type one dynamic tensors require the previous layer's output shape for inference
1461 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1462 {
Mike Kellye2d611e2021-10-14 12:35:58 +01001463 if (!layer.GetInputSlot(inputSlotIndex).GetConnection())
Sadik Armagandbda4b72020-09-03 11:33:07 +01001464 {
1465 return false;
1466 }
1467 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001468 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001469 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001470 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001471 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1472
Sadik Armagandbda4b72020-09-03 11:33:07 +01001473 if(!isSupported)
1474 {
1475 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1476 {
1477 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1478 }
1479 return false;
1480 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001481 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001482
Finn Williamsa4983ce2020-07-23 12:55:12 +01001483 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Kevin Mayfcf2a152020-09-08 16:06:32 +01001484
1485 if (activationFunction != ActivationFn::kActivationNone)
1486 {
1487 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1488 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1489 &layer, data);
1490
1491 if (!endLayer)
1492 {
1493 return Fail("%s: ProcessActivation failed", __func__);
1494 }
1495
1496 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1497 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1498 }
1499 else
1500 {
1501 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1502 }
Finn Williamsa4983ce2020-07-23 12:55:12 +01001503
Mike Kellyb5fdf382019-06-11 16:35:25 +01001504 return true;
1505}
1506
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001507template<typename HalPolicy,
1508 typename HalOperation = typename HalPolicy::Operation,
1509 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001510armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1511 uint32_t inputIndex,
1512 const HalModel& model,
1513 ConversionData& data)
1514{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001515 using HalOperand = typename HalPolicy::Operand;
1516
1517 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001518 if (!operand)
1519 {
1520 return armnn::DataLayout::NHWC;
1521 }
1522
1523 if (!IsBool(*operand))
1524 {
1525 return armnn::DataLayout::NHWC;
1526 }
1527
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001528 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001529 if (!valueAddress)
1530 {
1531 return armnn::DataLayout::NHWC;
1532 }
1533
1534 if (*(static_cast<const bool*>(valueAddress)))
1535 {
1536 return armnn::DataLayout::NCHW;
1537 }
1538 else
1539 {
1540 return armnn::DataLayout::NHWC;
1541 }
1542}
1543
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001544template<typename HalPolicy,
1545 typename HalOperation = typename HalPolicy::Operation,
1546 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001547bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1548 uint32_t outputIndex,
1549 armnn::IConnectableLayer& layer,
1550 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001551 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001552 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001553 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1554 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001555{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001556 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1557 outputIndex,
1558 layer,
1559 outputIndex,
1560 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001561 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001562 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001563 validateFunc,
1564 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001565}
1566
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001567template<typename HalPolicy,
1568 typename HalOperation = typename HalPolicy::Operation,
1569 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001570bool ConvertToActivation(const HalOperation& operation,
1571 const char* operationName,
1572 const armnn::ActivationDescriptor& activationDesc,
1573 const HalModel& model,
1574 ConversionData& data)
1575{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001576 using HalOperand = typename HalPolicy::Operand;
1577
1578 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001579 if (!input.IsValid())
1580 {
1581 return Fail("%s: Input 0 is invalid", operationName);
1582 }
1583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001584 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001585 if (!outputOperand)
1586 {
1587 return false;
1588 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001589
1590 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001591
1592 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001593
1594 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1595 {
1596 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1597 IsActivationSupported,
1598 data.m_Backends,
1599 isSupported,
1600 input.GetTensorInfo(),
1601 outInfo,
1602 activationDesc);
1603 };
1604
1605 if(IsDynamicTensor(outInfo))
1606 {
1607 isSupported = AreDynamicTensorsSupported();
1608 }
1609 else
1610 {
1611 validateFunc(outInfo, isSupported);
1612 }
1613
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001614 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001615 {
1616 return false;
1617 }
1618
1619 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01001620 if (!layer)
1621 {
1622 return Fail("%s: Could not add the ActivationLayer", __func__);
1623 }
arovir01b0717b52018-09-05 17:03:25 +01001624 input.Connect(layer->GetInputSlot(0));
1625
Finn Williamsa4983ce2020-07-23 12:55:12 +01001626 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001627}
1628
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001629template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001630 typename HalOperation = typename HalPolicy::Operation,
1631 typename HalModel = typename HalPolicy::Model>
1632bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1633{
1634 armnn::ActivationDescriptor desc;
1635 desc.m_Function = armnn::ActivationFunction::ReLu;
1636
1637 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1638}
1639
1640template<typename HalPolicy,
1641 typename HalOperation = typename HalPolicy::Operation,
1642 typename HalModel = typename HalPolicy::Model>
1643bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1644{
1645 armnn::ActivationDescriptor desc;
1646 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1647 desc.m_A = 1.0f;
1648 desc.m_B = -1.0f;
1649
1650 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1651}
1652
1653template<typename HalPolicy,
1654 typename HalOperation = typename HalPolicy::Operation,
1655 typename HalModel = typename HalPolicy::Model>
1656bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1657{
1658 armnn::ActivationDescriptor desc;
1659 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1660 desc.m_A = 6.0f;
1661
1662 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1663}
1664
1665template<typename HalPolicy,
1666 typename HalOperation = typename HalPolicy::Operation,
1667 typename HalModel = typename HalPolicy::Model>
1668bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1669{
1670 armnn::ActivationDescriptor desc;
1671 desc.m_Function = armnn::ActivationFunction::TanH;
1672 desc.m_A = 1.0f; // android nn does not support tanH parameters
1673 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1674
1675 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1676}
1677
1678template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001679 typename HalOperation = typename HalPolicy::Operation,
1680 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001681bool ConvertPaddings(const HalOperation& operation,
1682 const HalModel& model,
1683 ConversionData& data,
1684 unsigned int rank,
1685 armnn::PadDescriptor& padDescriptor)
1686{
1687 using HalOperand = typename HalPolicy::Operand;
1688
1689 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1690 if (!paddingsOperand)
1691 {
1692 return Fail("%s: Could not read paddings operand", __func__);
1693 }
1694
1695 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1696 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1697 {
1698 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1699 }
1700
1701 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001702 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1703 {
1704 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1705 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001706
1707 // add padding for each dimension of input tensor.
1708 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1709 {
1710 int paddingBeforeInput = paddings[i];
1711 int paddingAfterInput = paddings[i + 1];
1712
1713 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1714 {
1715 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1716 }
1717
1718 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1719 }
1720
1721 return true;
1722}
1723
1724template<typename HalPolicy,
1725 typename HalOperation = typename HalPolicy::Operation,
1726 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001727bool ConvertPooling2d(const HalOperation& operation,
1728 const char* operationName,
1729 armnn::PoolingAlgorithm poolType,
1730 const HalModel& model,
1731 ConversionData& data)
1732{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001733 using HalOperand = typename HalPolicy::Operand;
1734 using HalOperandType = typename HalPolicy::OperandType;
1735
1736 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001737 if (!input.IsValid())
1738 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001739 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001740 }
1741
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001742 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001743 if (!output)
1744 {
1745 return Fail("%s: Could not read output 0", __func__);
1746 }
1747
1748 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1749 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1750
arovir01b0717b52018-09-05 17:03:25 +01001751 armnn::Pooling2dDescriptor desc;
1752 desc.m_PoolType = poolType;
1753 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001754 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001755
1756 ActivationFn activation;
1757
Sadik Armagan15d63e22019-07-26 16:59:35 +01001758 auto inputSize = operation.inputs.size();
1759
1760 if (inputSize >= 10)
1761 {
1762 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1763 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1764 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1765 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1766 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1767 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1768 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1769 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1770 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1771 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1772 {
1773 return Fail("%s: Operation has invalid inputs", operationName);
1774 }
1775
Kevin May42477c12020-03-26 13:34:14 +00001776 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001777 {
1778 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1779 }
1780 }
1781 else
arovir01b0717b52018-09-05 17:03:25 +01001782 {
1783 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1784 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001785 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1786 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1787 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1788 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1789 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1790 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001791 {
1792 return Fail("%s: Operation has invalid inputs", operationName);
1793 }
1794
Kevin May42477c12020-03-26 13:34:14 +00001795 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001796 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001797 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001798 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001799
1800 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1801 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1802 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1803
1804 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1805 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001806 }
1807
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001808 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001809
1810 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1811 {
1812 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1813 IsPooling2dSupported,
1814 data.m_Backends,
1815 isSupported,
1816 inputInfo,
1817 outputInfo,
1818 desc);
1819
1820 };
1821
1822 if(IsDynamicTensor(outputInfo))
1823 {
1824 isSupported = AreDynamicTensorsSupported();
1825 }
1826 else
1827 {
1828 validateFunc(outputInfo, isSupported);
1829 }
1830
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001831 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001832 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001833 return false;
arovir01b0717b52018-09-05 17:03:25 +01001834 }
arovir01b0717b52018-09-05 17:03:25 +01001835
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001836 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1837 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001838 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001839 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001840 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001841
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001842 input.Connect(pooling2dLayer->GetInputSlot(0));
1843
Finn Williamsa4983ce2020-07-23 12:55:12 +01001844 if (!isSupported)
1845 {
1846 return false;
1847 }
1848
Kevin Mayfcf2a152020-09-08 16:06:32 +01001849 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1850 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001851}
1852
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001853template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001854 typename HalOperation = typename HalPolicy::Operation,
1855 typename HalModel = typename HalPolicy::Model>
1856bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001857{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001858 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001859
1860 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1861 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1862
1863 if (!input0.IsValid() || !input1.IsValid())
1864 {
1865 return Fail("%s: Operation has invalid inputs", __func__);
1866 }
1867
1868 // The FuseActivation parameter is always the input index 2
1869 // and it should be optional
1870 ActivationFn activationFunction;
1871 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1872 {
1873 return Fail("%s: Operation has invalid inputs", __func__);
1874 }
1875
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001876 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001877 if (!outputOperand)
1878 {
1879 return false;
1880 }
1881
1882 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1883 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1884
1885 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001886
1887 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001888 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1889 {
1890 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1891 IsAdditionSupported,
1892 data.m_Backends,
1893 isSupported,
1894 inputInfo0,
1895 inputInfo1,
1896 outputInfo);
1897 };
1898
1899 if(!IsDynamicTensor(outputInfo))
1900 {
1901 validateFunc(outputInfo, isSupported);
1902 }
1903 else
1904 {
1905 isSupported = AreDynamicTensorsSupported();
1906 }
1907
Mike Kelly46272802019-08-14 17:00:48 +01001908 if (!isSupported)
1909 {
1910 return false;
1911 }
1912
1913 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001914
Kevin Mayfcf2a152020-09-08 16:06:32 +01001915 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1916 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001917 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001918 return false;
1919 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001920
Kevin Mayfcf2a152020-09-08 16:06:32 +01001921 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1922 data, nullptr, validateFunc, activationFunction);
1923
Mike Kelly46272802019-08-14 17:00:48 +01001924}
1925
1926template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001927 typename HalOperation = typename HalPolicy::Operation,
1928 typename HalModel = typename HalPolicy::Model>
1929bool ConvertArgMinMax(const HalOperation& operation,
1930 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001931 ConversionData& data,
1932 armnn::ArgMinMaxFunction argMinMaxFunction)
1933{
1934 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1935
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001936 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001937 using HalOperandType = typename HalPolicy::OperandType;
1938
1939 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1940
1941 if (!input0.IsValid())
1942 {
1943 return Fail("%s: Operation has invalid inputs", __func__);
1944 }
1945
1946 int32_t axis;
1947 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1948 {
1949 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1950 }
1951
1952 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1953 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1954
1955 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1956 {
1957 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1958 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1959 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1960 return Fail("%s: Axis must be in range [-n, n)", __func__);
1961 }
1962
1963 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1964 if (!output)
1965 {
1966 return Fail("%s: Could not read output 0", __func__);
1967 }
1968
1969 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1970
1971 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001972
1973 armnn::ArgMinMaxDescriptor descriptor;
1974 descriptor.m_Function = argMinMaxFunction;
1975 descriptor.m_Axis = axis;
1976
1977 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001978
1979 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1980 {
1981 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1982 IsArgMinMaxSupported,
1983 data.m_Backends,
1984 isSupported,
1985 inputInfo0,
1986 outputInfo,
1987 descriptor);
1988 };
1989
1990 if(IsDynamicTensor(outputInfo))
1991 {
1992 isSupported = AreDynamicTensorsSupported();
1993 }
1994 else
1995 {
1996 validateFunc(outputInfo, isSupported);
1997 }
1998
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001999 if (!isSupported)
2000 {
2001 return false;
2002 }
2003
2004 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002005 if (!layer)
2006 {
2007 return Fail("%s: Could not add the ArgMinMaxLayer", __func__);
2008 }
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002009 input0.Connect(layer->GetInputSlot(0));
2010
Finn Williamsa4983ce2020-07-23 12:55:12 +01002011 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00002012}
2013
2014template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002015 typename HalOperation = typename HalPolicy::Operation,
2016 typename HalModel = typename HalPolicy::Model>
2017bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01002018{
Keith Davis6e4081f2020-09-03 13:17:21 +01002019 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01002020 using HalOperandType = typename HalPolicy::OperandType;
2021
2022 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
2023 if (operation.inputs.size() <= 1)
2024 {
2025 return Fail("%s: Operation has insufficient arguments", __func__);
2026 }
2027
2028 // Get inputs and outputs
2029 const std::size_t numInputTensors = operation.inputs.size() - 1;
2030
2031 int32_t concatDim;
2032 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2033 {
2034 return Fail("%s: Operation has invalid inputs", __func__);
2035 }
2036
2037 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2038 if (!outputOperand)
2039 {
2040 return Fail("%s: Operation has no outputs", __func__);
2041 }
2042
Keith Davis6e4081f2020-09-03 13:17:21 +01002043 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2044 armnn::TensorShape outputShape = outputInfo.GetShape();
2045 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002046 //
2047 // handle negative concat dims along the lines of tensorflow as described here:
2048 // https://www.tensorflow.org/api_docs/python/tf/concat
2049 // "negative axis refers to axis + rank(values)-th dimension"
2050 //
2051 if (concatDim < 0)
2052 {
2053 concatDim += outputShape.GetNumDimensions();
2054 }
2055
2056 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2057 {
2058 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2059 }
2060
2061 std::vector<LayerInputHandle> inputHandles;
2062 std::vector<armnn::TensorShape> inputShapes;
2063
2064 inputHandles.reserve(numInputTensors);
2065 inputShapes.reserve(numInputTensors);
2066
Keith Davis6e4081f2020-09-03 13:17:21 +01002067 bool inputsHaveBeenReshaped = false;
2068 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002069 for (uint32_t i = 0; i < numInputTensors; ++i)
2070 {
2071 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2072 if (!operand)
2073 {
2074 return Fail("%s: Operation has invalid inputs", __func__);
2075 }
2076
Teresa Charlin3b959602019-10-31 17:05:47 +00002077 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2078 if (!operandInputHandle.IsValid())
2079 {
2080 return Fail("%s: Operation has invalid inputs", __func__);
2081 }
Mike Kellyb8805202019-07-31 17:25:43 +01002082
Keith Davis6e4081f2020-09-03 13:17:21 +01002083 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002084 if (operandShape.GetNumDimensions() == 0)
2085 {
2086 return Fail("%s: Operands with rank 0 are not supported", __func__);
2087 }
2088
2089 if (RequiresReshape(operandShape))
2090 {
2091 inputsHaveBeenReshaped = true;
2092
2093 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2094
2095 // Expand the tensor to three dimensions
2096 if (operandShape.GetNumDimensions() == 2)
2097 {
2098 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2099 tensorDimensionsAdded = 1;
2100 }
2101 else
2102 {
2103 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2104 tensorDimensionsAdded = 2;
2105 }
2106
Kevin Mayaed08ac2019-12-12 16:33:31 +00002107 armnn::ReshapeDescriptor reshapeDescriptor;
2108 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2109
2110 bool isSupported = false;
2111 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2112 IsReshapeSupported,
2113 data.m_Backends,
2114 isSupported,
2115 operandInputHandle.GetTensorInfo(),
2116 reshapeInfo,
2117 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002118
Kevin Mayaed08ac2019-12-12 16:33:31 +00002119 if (!isSupported)
2120 {
2121 return false;
2122 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002123 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002124
2125 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002126 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002127 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2128 }
2129
2130 inputShapes.emplace_back(operandShape);
2131 inputHandles.emplace_back(operandInputHandle);
2132
2133 if (!inputHandles.back().IsValid())
2134 {
2135 return Fail("%s: Operation has invalid inputs", __func__);
2136 }
2137 }
2138
Mike Kellye2d611e2021-10-14 12:35:58 +01002139 if (inputShapes.size() != inputHandles.size())
2140 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002141 return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002142 inputShapes.size(), inputHandles.size());
2143 }
Mike Kellyb8805202019-07-31 17:25:43 +01002144
2145 if (inputsHaveBeenReshaped)
2146 {
2147 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2148 concatDim += tensorDimensionsAdded;
2149
2150 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2151 if (tensorDimensionsAdded == 1)
2152 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002153 if (IsDynamicTensor(outputInfo))
2154 {
2155 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2156 }
2157 else
2158 {
2159 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2160 }
Mike Kellyb8805202019-07-31 17:25:43 +01002161 }
2162 else if (tensorDimensionsAdded == 2)
2163 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002164 if (IsDynamicTensor(outputInfo))
2165 {
2166 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2167 }
2168 else
2169 {
2170 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2171 }
Mike Kellyb8805202019-07-31 17:25:43 +01002172 }
2173 }
2174
2175 // Check if permutations is required and get the pair of permutations required for the concatenation.
2176 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2177 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002178 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002179 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2180 concatDim,
2181 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002182
Keith Davis6e4081f2020-09-03 13:17:21 +01002183 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2184 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002185 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002186 if (needPermute)
2187 {
2188 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2189 }
2190
2191 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002192 }
Mike Kellyb8805202019-07-31 17:25:43 +01002193 // this is no-op for identity swizzles, otherwise it replaces both
2194 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002195 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002196 {
2197 return false;
2198 }
Mike Kellyb8805202019-07-31 17:25:43 +01002199
2200 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2201 armnn::OriginsDescriptor concatDescriptor;
2202
2203 try
2204 {
2205 // The concat descriptor is always created across the only supported concat dimension
2206 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002207 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2208 inputShapes.end(),
2209 concatDim);
2210 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002211 {
2212 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2213 }
2214
2215 // Validate the output shape is correct given the input shapes based on the
2216 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002217 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002218 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002219 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2220 {
2221 return Fail("%s: Error validating the output shape for concat", __func__);
2222 }
Mike Kellyb8805202019-07-31 17:25:43 +01002223 }
2224
2225 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2226 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002227 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002228
Keith Davis6e4081f2020-09-03 13:17:21 +01002229 bool isSupported = false;
2230 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2231 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2232 outputInfo, concatDescriptor);
2233 };
2234
2235 if (!isDynamicTensor)
2236 {
2237 validateFunc(outputInfo, isSupported);
2238 }
2239 else
2240 {
2241 isSupported = AreDynamicTensorsSupported();
2242 }
2243
Mike Kellyb8805202019-07-31 17:25:43 +01002244 if (!isSupported)
2245 {
2246 return false;
2247 }
2248
2249 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002250 if (!layer)
2251 {
2252 return Fail("%s: Could not add the ConcatLayer", __func__);
2253 }
Mike Kellyb8805202019-07-31 17:25:43 +01002254 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002255 // Connect inputs to the layer
2256 const int numInputSlots = layer->GetNumInputSlots();
Mike Kellye2d611e2021-10-14 12:35:58 +01002257
2258 if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
2259 {
Mike Kelly1b46d132021-11-03 11:12:45 +00002260 return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
Mike Kellye2d611e2021-10-14 12:35:58 +01002261 static_cast<std::size_t>(numInputSlots), inputHandles.size());
2262 }
Mike Kellyb8805202019-07-31 17:25:43 +01002263 for (int i = 0; i < numInputSlots; ++i)
2264 {
2265 // connect the input directly to the merge (concat) layer
Mike Kelly1b46d132021-11-03 11:12:45 +00002266 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
Mike Kellyb8805202019-07-31 17:25:43 +01002267 }
2268
Keith Davis6e4081f2020-09-03 13:17:21 +01002269 // Transpose the output shape
2270 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002271 armnn::TransposeDescriptor transposeDesc;
2272 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002273 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2274 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2275 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002276 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002277 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002278 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002279 data.m_Backends,
2280 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002281 inputTransposeInfo,
2282 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002283 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002284 if (!isSupported)
2285 {
2286 return false;
2287 }
Mike Kellyb8805202019-07-31 17:25:43 +01002288 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002289 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002290 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002291 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002292
2293 return true;
2294 };
2295
2296 if (needPermute && !isDynamicTensor)
2297 {
2298 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002299 }
2300
2301 if (inputsHaveBeenReshaped)
2302 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002303 if (isDynamicTensor)
2304 {
2305 // Infer the output shapes of concat if outputs are type 1 dynamic
Mike Kellye2d611e2021-10-14 12:35:58 +01002306 if (!layer->GetOutputSlot(0).IsTensorInfoSet())
2307 {
2308 return Fail("%s: TensorInfo is not set", __func__);
2309 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002310 if (!ValidateConcatOutputShape(inputShapes,
2311 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2312 concatDim))
2313 {
2314 return Fail("%s: Error validating the output shape for concat", __func__);
2315 }
2316 transposeOutputShape();
2317 }
2318
Mike Kellyb8805202019-07-31 17:25:43 +01002319 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002320 // Undo the reshape knowing the amount of dimensions added
2321 if (tensorDimensionsAdded == 1)
2322 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002323 afterConcatInfo.SetShape(
2324 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002325 }
2326 else if (tensorDimensionsAdded == 2)
2327 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002328 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002329 }
2330
Kevin Mayaed08ac2019-12-12 16:33:31 +00002331 armnn::ReshapeDescriptor reshapeDescriptor;
2332 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002333 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002334
Keith Davis6e4081f2020-09-03 13:17:21 +01002335 isSupported = false;
2336 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2338 IsReshapeSupported,
2339 data.m_Backends,
2340 isSupported,
2341 concatInfo,
2342 afterConcatInfo,
2343 reshapeDescriptor);
2344 };
2345
2346 if (!IsDynamicTensor(afterConcatInfo))
2347 {
2348 validateReshapeFunc(afterConcatInfo, isSupported);
2349 }
2350 else
2351 {
2352 isSupported = AreDynamicTensorsSupported();
2353 }
2354
Kevin Mayaed08ac2019-12-12 16:33:31 +00002355 if (!isSupported)
2356 {
2357 return false;
2358 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002359 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2360 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2361 0,
2362 *layer,
2363 model,
2364 data,
2365 nullptr,
2366 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002367 }
2368
Keith Davis6e4081f2020-09-03 13:17:21 +01002369 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002370}
2371
2372template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002373 typename HalOperation = typename HalPolicy::Operation,
2374 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002375bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2376{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002377 using HalOperand = typename HalPolicy::Operand;
2378 using HalOperandType = typename HalPolicy::OperandType;
2379
2380 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002381 if (!input.IsValid())
2382 {
2383 return Fail("%s: Operation has invalid inputs", __func__);
2384 }
2385
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002386 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002387 if (!output)
2388 {
2389 return Fail("%s: Could not read output 0", __func__);
2390 }
2391
2392 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002393 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002394
2395 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002396 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2397 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002398
2399 if (!weightsPin.IsValid() || !biasPin.IsValid())
2400 {
2401 return Fail("%s: Operation has invalid inputs", __func__);
2402 }
2403
2404 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002405 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002406 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2407
2408 armnn::Convolution2dDescriptor desc;
2409 desc.m_DataLayout = armnn::DataLayout::NHWC;
2410 ActivationFn activation;
2411
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002412 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002413 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002414 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2415 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2416 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2417 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2418 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2419 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002420 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002421 {
2422 return Fail("%s: Operation has invalid inputs", __func__);
2423 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002424 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002425 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002426 {
2427 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002428 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2429 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2430 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002431 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002432 {
2433 return Fail("%s: Operation has invalid inputs", __func__);
2434 }
2435
2436 const uint32_t kernelX = weights.GetShape()[2];
2437 const uint32_t kernelY = weights.GetShape()[1];
2438 const uint32_t inputX = inputInfo.GetShape()[2];
2439 const uint32_t inputY = inputInfo.GetShape()[1];
2440
2441 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2442 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002443 }
2444 else
2445 {
2446 return Fail("%s: Unsupported number of operation inputs", __func__);
2447 }
2448
2449 desc.m_BiasEnabled = true;
2450 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2451
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002452 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002453 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2454 {
2455 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2456 IsConvolution2dSupported,
2457 data.m_Backends,
2458 isSupported,
2459 inputInfo,
2460 outputInfo,
2461 desc,
2462 weights.GetInfo(),
2463 biases);
2464 };
2465
2466 if(!IsDynamicTensor(outputInfo))
2467 {
2468 validateFunc(outputInfo, isSupported);
2469 }
2470 else
2471 {
2472 isSupported = AreDynamicTensorsSupported();
2473 }
2474
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002475 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002476 {
2477 return false;
2478 }
2479
2480 armnn::IConnectableLayer* startLayer =
2481 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2482
2483 if (!startLayer)
2484 {
2485 return Fail("%s: AddConvolution2dLayer failed", __func__);
2486 }
2487
Mike Kellyb5fdf382019-06-11 16:35:25 +01002488 input.Connect(startLayer->GetInputSlot(0));
2489
Kevin Mayfcf2a152020-09-08 16:06:32 +01002490 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2491 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002492}
2493
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002494template<typename HalPolicy,
2495 typename HalOperation = typename HalPolicy::Operation,
2496 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002497bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2498{
2499 using HalOperand = typename HalPolicy::Operand;
2500 using HalOperandType = typename HalPolicy::OperandType;
2501
2502 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2503 if (!input.IsValid() )
2504 {
2505 return Fail("%s: Operation has invalid inputs", __func__);
2506 }
2507
2508 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2509 unsigned int rank = inputInfo.GetNumDimensions();
2510 if (rank != 4)
2511 {
2512 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2513 }
2514
2515 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2516 if (!output)
2517 {
2518 return Fail("%s: Could not read output 0", __func__);
2519 }
2520
2521 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002522
2523 armnn::DepthToSpaceDescriptor descriptor;
2524
2525 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2526 if (descriptor.m_BlockSize <= 1)
2527 {
2528 return Fail("%s: Block size must be at least 1 in all dimensions");
2529 }
2530
2531 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002532 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002533 {
2534 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2535 }
2536
2537 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002538 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2539 {
2540 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2541 IsDepthToSpaceSupported,
2542 data.m_Backends,
2543 isSupported,
2544 inputInfo,
2545 outputInfo,
2546 descriptor);
2547 };
2548
2549 if(!IsDynamicTensor(outputInfo))
2550 {
2551 validateFunc(outputInfo, isSupported);
2552 }
2553 else
2554 {
2555 isSupported = AreDynamicTensorsSupported();
2556 }
2557
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002558 if (!isSupported)
2559 {
2560 return false;
2561 }
2562
2563 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01002564 if (!layer)
2565 {
2566 return Fail("%s: Could not add the DepthToSpaceLayer", __func__);
2567 }
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002568 input.Connect(layer->GetInputSlot(0));
2569
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002570 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002571}
2572
2573template<typename HalPolicy,
2574 typename HalOperation = typename HalPolicy::Operation,
2575 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002576bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2577{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002578 using HalOperand = typename HalPolicy::Operand;
2579 using HalOperandType = typename HalPolicy::OperandType;
2580
2581 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002582
2583 if (!input.IsValid())
2584 {
2585 return Fail("%s: Operation has invalid inputs", __func__);
2586 }
2587
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002588 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002589
2590 if (!output)
2591 {
2592 return Fail("%s: Could not read output 0", __func__);
2593 }
2594
2595 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002596 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002597
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002598 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002599 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002600 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002601
2602 if (weightsOperand == nullptr)
2603 {
2604 return Fail("%s: Operand is invalid", __func__);
2605 }
Colm Donelanccfeb5e2021-03-30 15:30:13 +01002606 // Basic sanity check on the weights shape.
2607 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
2608 // [1, filter_height, filter_width, depth_out]
2609 if (weightsOperand->dimensions[0] != 1)
2610 {
2611 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
2612 }
2613
Mike Kellyb5fdf382019-06-11 16:35:25 +01002614 armnn::DepthwiseConvolution2dDescriptor desc;
2615 desc.m_DataLayout = armnn::DataLayout::NHWC;
2616
Jan Eilersa20d2b82021-04-27 09:21:08 +01002617 // The layout for weights in depthwise is [ 1, H, W, O] and it's the same in ArmNN. No need to permute anything.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002618 const ConstTensorPin weightsPin =
2619 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2620 1,
2621 model,
Jan Eilersa20d2b82021-04-27 09:21:08 +01002622 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002623
2624 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002625 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002626
2627 if (!weightsPin.IsValid() || !biasPin.IsValid())
2628 {
2629 return Fail("%s: Operation has invalid inputs", __func__);
2630 }
2631
2632 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2633 armnn::ConstTensor bias = biasPin.GetConstTensor();
2634 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2635
2636 ActivationFn activation;
2637
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002638 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002639 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002640 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2641 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2642 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2643 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2644 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2645 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002646 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002647 {
2648 return Fail("%s: Operation has invalid inputs", __func__);
2649 }
2650 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002651 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002652 {
2653 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002654 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2655 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2656 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002657 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002658 {
2659 return Fail("%s: Operation has invalid inputs", __func__);
2660 }
2661
Jan Eilersa20d2b82021-04-27 09:21:08 +01002662 const uint32_t kernelX = weights.GetShape()[2];
2663 const uint32_t kernelY = weights.GetShape()[1];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002664 const uint32_t inputX = inputInfo.GetShape()[2];
2665 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002666
2667 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2668 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2669 }
2670 else
2671 {
2672 return Fail("%s: Unsupported number of operation inputs", __func__);
2673 }
2674
2675 desc.m_BiasEnabled = true;
2676 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2677
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002678 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002679 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2680 {
2681 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2682 IsDepthwiseConvolutionSupported,
2683 data.m_Backends,
2684 isSupported,
2685 inputInfo,
2686 outputInfo,
2687 desc,
2688 weights.GetInfo(),
2689 biases);
2690 };
2691
2692 if(!IsDynamicTensor(outputInfo))
2693 {
2694 validateFunc(outputInfo, isSupported);
2695 }
2696 else
2697 {
2698 isSupported = AreDynamicTensorsSupported();
2699 }
2700
2701
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002702 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002703 {
2704 return false;
2705 }
2706
2707 armnn::IConnectableLayer* startLayer =
2708 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2709 if (!startLayer)
2710 {
2711 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2712 }
2713
Mike Kellyb5fdf382019-06-11 16:35:25 +01002714 input.Connect(startLayer->GetInputSlot(0));
2715
Kevin Mayfcf2a152020-09-08 16:06:32 +01002716 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2717 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002718}
2719
Mike Kelly3c673942019-07-25 09:26:06 +01002720template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002721 typename HalOperation = typename HalPolicy::Operation,
2722 typename HalModel = typename HalPolicy::Model>
2723bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002724{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002725 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002726
2727 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2728 if (!input.IsValid())
2729 {
2730 return Fail("%s: Operation has invalid input", __func__);
2731 }
2732
Sadik Armagan98c0f662019-11-21 15:54:36 +00002733 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2734 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2735 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2736 {
2737 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2738 }
2739
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002740 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002741 if (!outputOperand)
2742 {
2743 return Fail("%s: Operation has invalid outputs", __func__);
2744 }
2745
2746 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002747
2748 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002749 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2750 {
2751 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2752 IsDequantizeSupported,
2753 data.m_Backends,
2754 isSupported,
2755 inputInfo,
2756 outputInfo);
2757 };
2758
2759 if(IsDynamicTensor(outputInfo))
2760 {
2761 isSupported = AreDynamicTensorsSupported();
2762 }
2763 else
2764 {
2765 validateFunc(outputInfo, isSupported);
2766 }
2767
Mike Kelly46272802019-08-14 17:00:48 +01002768 if (!isSupported)
2769 {
2770 return false;
2771 }
2772
2773 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Mike Kellye2d611e2021-10-14 12:35:58 +01002774 if (!layer)
2775 {
2776 return Fail("%s: Could not add the DequantizeLayer", __func__);
2777 }
Mike Kelly46272802019-08-14 17:00:48 +01002778 input.Connect(layer->GetInputSlot(0));
2779
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002780 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002781}
2782
2783template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002784 typename HalOperation = typename HalPolicy::Operation,
2785 typename HalModel = typename HalPolicy::Model>
2786bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002787{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002788 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002789
2790 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2791 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2792
2793 if (!input0.IsValid() || !input1.IsValid())
2794 {
2795 return Fail("%s: Operation has invalid inputs", __func__);
2796 }
2797
2798 // The FuseActivation parameter is always the input index 2
2799 // and it should be optional
2800 ActivationFn activationFunction;
2801 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2802 {
2803 return Fail("%s: Operation has invalid inputs", __func__);
2804 }
2805
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002806 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002807 if (!output)
2808 {
2809 return Fail("%s: Could not read output 0", __func__);
2810 }
2811
2812 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002813
2814 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002815 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2816 {
2817 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2818 IsDivisionSupported,
2819 data.m_Backends,
2820 isSupported,
2821 input0.GetTensorInfo(),
2822 input1.GetTensorInfo(),
2823 outputInfo);
2824 };
2825
2826 if(!IsDynamicTensor(outputInfo))
2827 {
2828 validateFunc(outputInfo, isSupported);
2829 }
2830 else
2831 {
2832 isSupported = AreDynamicTensorsSupported();
2833 }
2834
Mike Kelly46272802019-08-14 17:00:48 +01002835 if (!isSupported)
2836 {
2837 return false;
2838 }
2839
2840 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002841
Kevin Mayfcf2a152020-09-08 16:06:32 +01002842 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2843 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002844 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002845 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002846 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002847
2848 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2849 data, nullptr, validateFunc, activationFunction);
2850
Mike Kelly46272802019-08-14 17:00:48 +01002851}
2852
2853template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002854 typename HalOperation = typename HalPolicy::Operation,
2855 typename HalModel = typename HalPolicy::Model>
2856bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002857{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002858 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002859
2860 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2861 if (!input.IsValid())
2862 {
2863 return Fail("%s: Operation has invalid inputs", __func__);
2864 }
2865
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002866 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002867 if (!outputOperand)
2868 {
2869 return Fail("%s: Operation has invalid outputs", __func__);
2870 }
2871
2872 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002873
2874 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002875 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2876 {
2877 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2878 IsFloorSupported,
2879 data.m_Backends,
2880 isSupported,
2881 input.GetTensorInfo(),
2882 outputInfo);
2883 };
2884
2885 if(!IsDynamicTensor(outputInfo))
2886 {
2887 validateFunc(outputInfo, isSupported);
2888 }
2889 else
2890 {
2891 isSupported = AreDynamicTensorsSupported();
2892 }
2893
Mike Kelly46272802019-08-14 17:00:48 +01002894 if (!isSupported)
2895 {
2896 return false;
2897 }
2898
2899 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Mike Kellye2d611e2021-10-14 12:35:58 +01002900 if (!layer)
2901 {
2902 return Fail("%s: Could not add the FloorLayer", __func__);
2903 }
Mike Kelly46272802019-08-14 17:00:48 +01002904 input.Connect(layer->GetInputSlot(0));
2905
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002906 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002907}
2908
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002909inline bool IsQSymm8(const V1_0::Operand&)
2910{
2911 return false;
2912}
2913
Kevin May42477c12020-03-26 13:34:14 +00002914#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002915
2916inline bool IsQSymm8(const V1_2::Operand& operand)
2917{
2918 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2919}
2920
2921#endif
2922
Kevin May42477c12020-03-26 13:34:14 +00002923#ifdef ARMNN_ANDROID_NN_V1_3
2924
2925inline bool IsQSymm8(const V1_3::Operand& operand)
2926{
2927 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2928}
2929
2930#endif
2931
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002932enum class DequantizeStatus
2933{
2934 SUCCESS,
2935 NOT_REQUIRED,
2936 INVALID_OPERAND
2937};
2938
2939using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2940
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002941template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002942 typename HalOperation = typename HalPolicy::Operation,
2943 typename HalModel = typename HalPolicy::Model>
2944DequantizeResult DequantizeIfRequired(size_t operand_index,
2945 const HalOperation& operation,
2946 const HalModel& model,
2947 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002948{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002949 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002950
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002951 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002952 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002953 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002954 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002955 }
2956
2957 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2958 {
2959 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002960 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002961 }
2962
2963 const size_t weightsInputIndex = operation.inputs[operand_index];
2964
2965 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2966 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002967 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002968 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002969 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002970 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002971 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2972 {
2973 continue;
2974 }
2975
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002976 size_t outOpIndex = weightsInputIndex + 1;
2977 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002978 {
2979 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002980 }
2981
2982 if (outOpIndex != weightsInputIndex)
2983 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002984 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002985 }
2986
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002987 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Mike Kellye2d611e2021-10-14 12:35:58 +01002988
2989 if (!operand)
2990 {
2991 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
2992 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002993
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002994 if (!IsQSymm8(*operand))
2995 {
2996 // Only supporting dequantize from QSYMM8 to FLOAT
2997 break;
2998 }
2999
3000 // Allocate a new buffer for the dequantized data and manually dequantize
3001 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
3002 if (!startValue)
3003 {
3004 // Failed to get the operand address
3005 break;
3006 }
3007
3008 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
3009 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003010 const float quantizationScale = operand->scale;
3011
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003012 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
3013 for (size_t i = 0; i < dequantizedBufferLength; ++i)
3014 {
3015 float* dstPtr = dequantizedBuffer.get();
Mike Kellye2d611e2021-10-14 12:35:58 +01003016
3017 if (!dstPtr)
3018 {
3019 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
3020 }
Mike Kelly1b46d132021-11-03 11:12:45 +00003021 *dstPtr = quantizedBuffer[i] * quantizationScale;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003022 }
3023
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003024 // Construct tensor info for dequantized ConstTensor
3025 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
3026 operand->dimensions.data(),
3027 armnn::DataType::Float32);
3028
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003029 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
3030 std::move(tensorInfo),
3031 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003032 }
3033
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003034 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003035}
3036
3037template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003038 typename HalOperation = typename HalPolicy::Operation,
3039 typename HalModel = typename HalPolicy::Model>
3040ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
3041 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003042 const ConversionData& data,
3043 size_t operandIndex,
3044 bool optional = false)
3045{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003046 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
3047
3048 DequantizeStatus status = std::get<3>(dequantized);
3049 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003050 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003051 case DequantizeStatus::INVALID_OPERAND:
3052 {
3053 // return invalid const tensor pin
3054 return ConstTensorPin();
3055 }
3056 case DequantizeStatus::NOT_REQUIRED:
3057 {
3058 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3059 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3060 }
3061 case DequantizeStatus::SUCCESS:
3062 default:
3063 {
3064 return ConstTensorPin(
3065 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3066 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003067 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003068}
3069
3070
Mike Kelly46272802019-08-14 17:00:48 +01003071template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003072 typename HalOperation = typename HalPolicy::Operation,
3073 typename HalModel = typename HalPolicy::Model>
3074bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003075{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003076 using HalOperand = typename HalPolicy::Operand;
3077
Mike Kelly46272802019-08-14 17:00:48 +01003078 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3079 if (!input.IsValid())
3080 {
3081 return Fail("%s: Operation has invalid inputs", __func__);
3082 }
3083
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003084 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003085 if (!output)
3086 {
3087 return Fail("%s: Could not read output 0", __func__);
3088 }
3089
3090 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3091 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3092
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003093 LayerInputHandle weightsInput = LayerInputHandle();
3094 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3095 if (!weightsOperand)
Mike Kelly46272802019-08-14 17:00:48 +01003096 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003097 return Fail("%s: Could not read weights", __func__);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003098 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003099
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003100 // If weights are constant a separate constant layer will be created to store data.
3101 // Otherwise handle non const weights as inputs.
3102 weightsInput = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3103 if (!weightsInput.IsValid())
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003104 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003105 return Fail("%s: Operation has invalid inputs", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003106 }
3107
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003108 LayerInputHandle biasInput = LayerInputHandle();
3109 const HalOperand* biasOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3110 if (!biasOperand)
3111 {
3112 return Fail("%s: Could not read bias", __func__);
3113 }
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003114
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003115 // If bias are constant a separate constant layer will be created to store data.
3116 // Otherwise handle non const bias as inputs.
3117 biasInput = ConvertToLayerInputHandle<HalPolicy>(operation, 2, model, data); // 1D
3118 if (!biasInput.IsValid())
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003119 {
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003120 return Fail("%s: Operation has invalid inputs", __func__);
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003121 }
3122
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003123 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
Mike Kelly46272802019-08-14 17:00:48 +01003124 armnn::TensorInfo reshapedInfo = inputInfo;
Mike Kelly46272802019-08-14 17:00:48 +01003125 try
3126 {
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003127 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003128 }
3129 catch (const std::exception& e)
3130 {
Mike Kelly46272802019-08-14 17:00:48 +01003131 return Fail("%s: %s", __func__, e.what());
3132 }
3133
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003134 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
Matthew Sloyan56c249c2021-08-09 12:49:23 +01003135 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003136 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
Mike Kelly46272802019-08-14 17:00:48 +01003137
3138 ActivationFn activationFunction;
3139 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3140 {
3141 return Fail("%s: Operation has invalid inputs", __func__);
3142 }
3143
3144 armnn::FullyConnectedDescriptor desc;
3145 desc.m_TransposeWeightMatrix = true;
3146 desc.m_BiasEnabled = true;
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003147 desc.m_ConstantWeights = IsOperandConstant<HalPolicy>(*weightsOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003148
3149 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003150 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3151 {
Finn Williams49184462020-10-02 13:28:34 +01003152 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003153 weightsInfo.GetShape(),
Finn Williams49184462020-10-02 13:28:34 +01003154 outputInfo.GetShape(),
3155 desc.m_TransposeWeightMatrix))
3156 {
3157 isSupported = false;
3158 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3159 return;
3160 }
3161
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003162 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003163 IsFullyConnectedSupported,
3164 data.m_Backends,
3165 isSupported,
3166 reshapedInfo,
3167 outputInfo,
3168 weightsInfo,
3169 biasInfo,
3170 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003171 };
3172
3173 if(!IsDynamicTensor(outputInfo))
3174 {
3175 validateFunc(outputInfo, isSupported);
3176 }
3177 else
3178 {
3179 isSupported = AreDynamicTensorsSupported();
3180 }
3181
Mike Kelly46272802019-08-14 17:00:48 +01003182 if (!isSupported)
3183 {
3184 return false;
3185 }
3186
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003187 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
3188 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Mike Kelly46272802019-08-14 17:00:48 +01003189
Kevin Mayfcf2a152020-09-08 16:06:32 +01003190 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003191 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003192 armnn::ReshapeDescriptor reshapeDescriptor;
3193 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003194
Kevin Mayfcf2a152020-09-08 16:06:32 +01003195 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003196 if (!reshapeLayer)
3197 {
3198 return Fail("%s: could not add the reshapeLayer", __func__);
3199 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003200 input.Connect(reshapeLayer->GetInputSlot(0));
3201 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3202 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003203 }
3204 else
3205 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003206 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003207 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003208
Matthew Sloyan29cc9612021-07-16 10:21:12 +01003209 // Connect weights and bias inputs
3210 weightsInput.Connect(startLayer->GetInputSlot(1));
3211 biasInput.Connect(startLayer->GetInputSlot(2));
Sadik Armagan2e4a24a2021-03-18 13:59:40 +00003212
Kevin Mayfcf2a152020-09-08 16:06:32 +01003213 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3214 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003215}
3216
3217template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003218 typename HalOperation = typename HalPolicy::Operation,
3219 typename HalModel = typename HalPolicy::Model>
3220bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003221{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003222 using HalOperand = typename HalPolicy::Operand;
3223
Mike Kelly999e2092019-08-15 10:46:46 +01003224 if (operation.inputs.size() != 1)
3225 {
3226 return Fail("%s: Optional inputs are not supported", __func__);
3227 }
3228
Mike Kelly46272802019-08-14 17:00:48 +01003229 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3230 if (!input.IsValid())
3231 {
3232 return Fail("%s: Operation has invalid inputs", __func__);
3233 }
3234
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003235 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003236 if (!output)
3237 {
3238 return Fail("%s: Could not read output 0", __func__);
3239 }
3240
3241 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3242 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3243
Mike Kelly46272802019-08-14 17:00:48 +01003244 if (outputInfo.GetNumDimensions() != 4u)
3245 {
3246 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3247 }
3248
3249 armnn::L2NormalizationDescriptor desc;
3250 desc.m_DataLayout = armnn::DataLayout::NHWC;
3251
3252 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003253 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3254 {
3255 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3256 IsL2NormalizationSupported,
3257 data.m_Backends,
3258 isSupported,
3259 inputInfo,
3260 outputInfo,
3261 desc);
3262 };
3263
3264 if(!IsDynamicTensor(outputInfo))
3265 {
3266 validateFunc(outputInfo, isSupported);
3267 }
3268 else
3269 {
3270 isSupported = AreDynamicTensorsSupported();
3271 }
3272
Mike Kelly46272802019-08-14 17:00:48 +01003273 if (!isSupported)
3274 {
3275 return false;
3276 }
3277
3278 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Mike Kellye2d611e2021-10-14 12:35:58 +01003279 if (!layer)
3280 {
3281 return Fail("%s: Could not add the L2NormalizationLayer", __func__);
3282 }
Mike Kelly46272802019-08-14 17:00:48 +01003283 input.Connect(layer->GetInputSlot(0));
3284
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003285 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003286}
3287
3288template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003289 typename HalOperation = typename HalPolicy::Operation,
3290 typename HalModel = typename HalPolicy::Model>
3291bool ConvertLocalResponseNormalization(const HalOperation& operation,
3292 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003293 ConversionData& data)
3294{
Mike Kelly999e2092019-08-15 10:46:46 +01003295 if (operation.inputs.size() != 5)
3296 {
3297 return Fail("%s: Optional inputs are not supported", __func__);
3298 }
3299
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003300 using HalOperand = typename HalPolicy::Operand;
3301 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003302
3303 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3304 if (!input.IsValid())
3305 {
3306 return Fail("%s: Operation has invalid inputs", __func__);
3307 }
3308
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003309 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003310 if (!output)
3311 {
3312 return Fail("%s: Could not read output 0", __func__);
3313 }
3314
3315 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3316 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3317
Mike Kelly46272802019-08-14 17:00:48 +01003318 if (outputInfo.GetNumDimensions() != 4u)
3319 {
3320 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3321 }
3322
3323 armnn::NormalizationDescriptor descriptor;
3324 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3325 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3326 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3327
3328 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003329 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003330 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3331 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3332 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3333 {
3334 return Fail("%s: Operation has invalid inputs", __func__);
3335 }
3336
3337 // ArmNN expects normSize to be the full size of the normalization
3338 // window rather than the radius as in AndroidNN.
3339 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3340
3341 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003342 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3343 {
3344 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3345 IsNormalizationSupported,
3346 data.m_Backends,
3347 isSupported,
3348 inputInfo,
3349 outputInfo,
3350 descriptor);
3351 };
3352
3353 if(!IsDynamicTensor(outputInfo))
3354 {
3355 validateFunc(outputInfo, isSupported);
3356 }
3357 else
3358 {
3359 isSupported = AreDynamicTensorsSupported();
3360 }
3361
Mike Kelly46272802019-08-14 17:00:48 +01003362 if (!isSupported)
3363 {
3364 return false;
3365 }
3366
Mike Kelly46272802019-08-14 17:00:48 +01003367 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003368 if (!layer)
3369 {
3370 return Fail("%s: Could not add the NormalizationLayer", __func__);
3371 }
Mike Kelly46272802019-08-14 17:00:48 +01003372 input.Connect(layer->GetInputSlot(0));
3373
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003374 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003375}
3376
3377template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003378 typename HalOperation = typename HalPolicy::Operation,
3379 typename HalModel = typename HalPolicy::Model>
3380bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003381{
Mike Kelly46272802019-08-14 17:00:48 +01003382 armnn::ActivationDescriptor desc;
3383 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3384
3385 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3386}
3387
3388template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003389 typename HalOperation = typename HalPolicy::Operation,
3390 typename HalModel = typename HalPolicy::Model>
3391bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003392{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003393 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003394
3395 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3396 if (!input.IsValid())
3397 {
3398 return Fail("%s: Operation has invalid inputs", __func__);
3399 }
3400
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003401 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003402 if (!output)
3403 {
3404 return Fail("%s: Could not read output 0", __func__);
3405 }
3406
3407 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003408
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003409 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003410 if (!axisOperand)
3411 {
3412 return Fail("%s: Could not read input 1", __func__);
3413 }
3414
3415 std::vector<int32_t> axis;
3416 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3417 {
3418 return Fail("%s: Input 1 has invalid values", __func__);
3419 }
3420
3421 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3422
3423 // Convert the axis to unsigned int and remove duplicates.
3424 unsigned int rank = inputInfo.GetNumDimensions();
3425 std::set<unsigned int> uniqueAxis;
3426 std::transform(axis.begin(), axis.end(),
3427 std::inserter(uniqueAxis, uniqueAxis.begin()),
3428 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3429
3430 // Get the "keep dims" flag.
3431 int32_t keepDims = 0;
3432 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3433 {
3434 return Fail("%s: Could not read input 2", __func__);
3435 }
3436
3437 armnn::MeanDescriptor descriptor;
3438 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3439 descriptor.m_KeepDims = keepDims > 0;
3440
3441 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003442 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3443 {
3444 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3445 IsMeanSupported,
3446 data.m_Backends,
3447 isSupported,
3448 inputInfo,
3449 outputInfo,
3450 descriptor);
3451 };
3452
3453 if(!IsDynamicTensor(outputInfo))
3454 {
3455 validateFunc(outputInfo, isSupported);
3456 }
3457 else
3458 {
3459 isSupported = AreDynamicTensorsSupported();
3460 }
3461
Mike Kelly46272802019-08-14 17:00:48 +01003462 if (!isSupported)
3463 {
3464 return false;
3465 }
3466
3467 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003468 if (!layer)
3469 {
3470 return Fail("%s: Could not add the MeanLayer", __func__);
3471 }
Mike Kelly46272802019-08-14 17:00:48 +01003472 input.Connect(layer->GetInputSlot(0));
3473
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003474 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003475}
3476
3477template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003478 typename HalOperation = typename HalPolicy::Operation,
3479 typename HalModel = typename HalPolicy::Model>
3480bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003481{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003482 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003483
3484 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3485 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3486
3487 if (!input0.IsValid() || !input1.IsValid())
3488 {
3489 return Fail("%s: Operation has invalid inputs", __func__);
3490 }
3491
3492 // The FuseActivation parameter is always the input index 2
3493 // and it should be optional
3494 ActivationFn activationFunction;
3495 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3496 {
3497 return Fail("%s: Operation has invalid inputs", __func__);
3498 }
3499
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003500 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003501
3502 if (outputOperand == nullptr)
3503 {
3504 return false;
3505 }
3506
3507 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003508
3509 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003510 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3511 {
3512 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3513 IsMultiplicationSupported,
3514 data.m_Backends,
3515 isSupported,
3516 input0.GetTensorInfo(),
3517 input1.GetTensorInfo(),
3518 outputInfo);
3519 };
3520
3521 if(!IsDynamicTensor(outputInfo))
3522 {
3523 validateFunc(outputInfo, isSupported);
3524 }
3525 else
3526 {
3527 isSupported = AreDynamicTensorsSupported();
3528 }
3529
Mike Kelly46272802019-08-14 17:00:48 +01003530 if (!isSupported)
3531 {
3532 return false;
3533 }
3534
3535 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003536
Kevin Mayfcf2a152020-09-08 16:06:32 +01003537 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3538 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003539 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003540 return false;
3541 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003542
Kevin Mayfcf2a152020-09-08 16:06:32 +01003543 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3544 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003545}
3546
3547template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003548 typename HalOperation = typename HalPolicy::Operation,
3549 typename HalModel = typename HalPolicy::Model>
3550bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003551{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003552 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003553
Mike Kelly3c673942019-07-25 09:26:06 +01003554 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3555 if (!input.IsValid())
3556 {
3557 return Fail("%s: Operation has invalid inputs", __func__);
3558 }
3559
3560 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3561 unsigned int rank = inputInfo.GetNumDimensions();
3562
3563 armnn::PadDescriptor descriptor;
3564 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3565 {
3566 return Fail("%s: Could not convert paddings", __func__);
3567 }
3568
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003569 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3570 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003571 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3572 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3573 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003574 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003575 {
3576 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3577 }
3578
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003579 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003580 if (!output)
3581 {
3582 return Fail("%s: Could not read output", __func__);
3583 }
3584
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003585 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003586
3587 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003588 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3589 {
3590 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3591 IsPadSupported,
3592 data.m_Backends,
3593 isSupported,
3594 inputInfo,
3595 outputInfo,
3596 descriptor);
3597 };
3598
3599 if(!IsDynamicTensor(outputInfo))
3600 {
3601 validateFunc(outputInfo, isSupported);
3602 }
3603 else
3604 {
3605 isSupported = AreDynamicTensorsSupported();
3606 }
3607
Mike Kelly3c673942019-07-25 09:26:06 +01003608 if (!isSupported)
3609 {
3610 return false;
3611 }
3612
3613 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003614 if (!layer)
3615 {
3616 return Fail("%s: Could not add the PadLayer", __func__);
3617 }
Mike Kelly3c673942019-07-25 09:26:06 +01003618 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003619
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003620 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003621}
3622
Mike Kelly0a879362019-07-29 16:56:31 +01003623template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003624 typename HalOperation = typename HalPolicy::Operation,
3625 typename HalModel = typename HalPolicy::Model>
3626bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003627{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003628 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003629
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003630 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3631 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3632 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003633
3634 if (inputOperand == nullptr
3635 || requestedShapeOperand == nullptr
3636 || outputOperand == nullptr)
3637 {
3638 return Fail("%s: Operation has invalid inputs", __func__);
3639 }
3640
3641 if (requestedShapeOperand->dimensions.size() != 1)
3642 {
3643 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3644 __func__, requestedShapeOperand->dimensions.size());
3645 }
3646
3647 std::vector<int32_t> targetDimensions;
3648 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3649 {
3650 return Fail("%s: Could not read values of input 1", __func__);
3651 }
3652
3653 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3654
3655 Shape requestedShape;
3656 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3657 // function that resolves these values into a fully specified tensor shape.
3658 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3659 {
3660 return Fail("%s: Failed to resolve the requested shape", __func__);
3661 }
3662
Mike Kelly46272802019-08-14 17:00:48 +01003663 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3664 if (!input.IsValid())
3665 {
3666 return Fail("%s: Could not read input 0", __func__);
3667 }
3668
3669 armnn::ReshapeDescriptor reshapeDescriptor;
3670 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3671 requestedShape.dimensions.data());
3672
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003673 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3674
Mike Kelly46272802019-08-14 17:00:48 +01003675 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003676 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3677 {
3678 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3679 IsReshapeSupported,
3680 data.m_Backends,
3681 isSupported,
3682 input.GetTensorInfo(),
3683 outputInfo,
3684 reshapeDescriptor);
3685 };
3686
3687 if(!IsDynamicTensor(outputInfo))
3688 {
3689 validateFunc(outputInfo, isSupported);
3690 }
3691 else
3692 {
3693 isSupported = AreDynamicTensorsSupported();
3694 }
3695
Mike Kelly46272802019-08-14 17:00:48 +01003696 if (!isSupported)
3697 {
3698 return false;
3699 }
3700
3701 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003702 if (!layer)
3703 {
3704 return Fail("%s: Could not add the ReshapeLayer", __func__);
3705 }
Mike Kelly46272802019-08-14 17:00:48 +01003706 input.Connect(layer->GetInputSlot(0));
3707
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003708 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003709}
3710
3711template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003712 typename HalOperation = typename HalPolicy::Operation,
3713 typename HalModel = typename HalPolicy::Model>
3714bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003715{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003716 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003717
Mike Kelly0a879362019-07-29 16:56:31 +01003718 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3719 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3720
3721 if (!input0.IsValid() || !input1.IsValid())
3722 {
3723 return Fail("%s: Operation has invalid inputs", __func__);
3724 }
3725
3726 // The FuseActivation parameter is always the input index 2
3727 // and it should be optional
3728 ActivationFn activationFunction;
3729 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3730 {
3731 return Fail("%s: Operation has invalid inputs", __func__);
3732 }
3733
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003734 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003735 if (!output)
3736 {
3737 return Fail("%s: Could not read output 0", __func__);
3738 }
3739
3740 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003741
3742 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003743 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3744 {
3745 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3746 IsSubtractionSupported,
3747 data.m_Backends,
3748 isSupported,
3749 input0.GetTensorInfo(),
3750 input1.GetTensorInfo(),
3751 outputInfo);
3752 };
3753
3754 if(IsDynamicTensor(outputInfo))
3755 {
3756 isSupported = AreDynamicTensorsSupported();
3757 }
3758 else
3759 {
3760 validateFunc(outputInfo, isSupported);
3761 }
3762
Mike Kelly0a879362019-07-29 16:56:31 +01003763 if (!isSupported)
3764 {
3765 return false;
3766 }
3767
3768 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003769
Kevin Mayfcf2a152020-09-08 16:06:32 +01003770 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3771 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003772 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003773 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003774 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003775 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3776 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003777}
3778
Finn Williams23b87b32019-07-30 11:44:05 +01003779template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003780 typename HalOperation = typename HalPolicy::Operation,
3781 typename HalModel = typename HalPolicy::Model>
3782bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003783{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003784 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003785
3786 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3787 if (!input.IsValid())
3788 {
3789 return Fail("%s: Operation has invalid inputs", __func__);
3790 }
3791
3792 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3793 unsigned int rank = inputInfo.GetNumDimensions();
3794 if (rank > 4)
3795 {
3796 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3797 }
3798
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003799 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003800 if (!output)
3801 {
3802 return Fail("%s: Could not read output 0", __func__);
3803 }
Sadik Armagan346e8112020-09-02 09:55:14 +01003804
3805 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003806 {
3807 return Fail("%s: Dynamic output tensors are not supported", __func__);
3808 }
3809
3810 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3811 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003812 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003813
Mike Kelly46272802019-08-14 17:00:48 +01003814 std::vector<int32_t> axis;
3815 if (!axisOperand)
3816 {
Mike Kelly1b46d132021-11-03 11:12:45 +00003817 for (unsigned int i = 0; i < rank; ++i)
3818 {
3819 axis.push_back(static_cast<unsigned int>(i));
3820 }
Mike Kelly46272802019-08-14 17:00:48 +01003821 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003822 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003823 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003824 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003825 }
3826
3827 std::vector<uint32_t> outputDims;
3828 for (unsigned int i = 0; i < rank; i++)
3829 {
3830 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3831 auto currentDimension = inputInfo.GetShape()[i];
3832 if (skipSqueeze || currentDimension != 1)
3833 {
3834 outputDims.push_back(currentDimension);
3835 }
3836 }
3837
3838 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3839
3840 armnn::TensorInfo outputInfo = inputInfo;
3841 outputInfo.SetShape(outShape);
3842
3843 armnn::ReshapeDescriptor reshapeDesc;
3844 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3845
3846 bool isSupported = false;
3847 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3848 IsReshapeSupported,
3849 data.m_Backends,
3850 isSupported,
3851 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003852 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003853 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003854
Mike Kelly46272802019-08-14 17:00:48 +01003855 if (!isSupported)
3856 {
3857 return false;
3858 }
3859
3860 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01003861 if (!layer)
3862 {
3863 return Fail("%s: Could not add the ReshapeLayer", __func__);
3864 }
Mike Kelly46272802019-08-14 17:00:48 +01003865 input.Connect(layer->GetInputSlot(0));
3866
3867 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3868}
3869
3870template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003871 typename HalOperation = typename HalPolicy::Operation,
3872 typename HalModel = typename HalPolicy::Model>
3873bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003874{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003875 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003876
3877 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3878 if (!input.IsValid())
3879 {
3880 return Fail("%s: Operation has invalid inputs", __func__);
3881 }
3882
3883 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3884 unsigned int rank = inputInfo.GetNumDimensions();
3885 if (rank > 4)
3886 {
3887 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3888 }
3889
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003890 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003891 if (!output)
3892 {
3893 return Fail("%s: Could not read output 0", __func__);
3894 }
3895
3896 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003897
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003898 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3899 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3900 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003901
3902 std::vector<int32_t> beginValues;
3903 std::vector<int32_t> endValues;
3904 std::vector<int32_t> stridesValues;
3905
3906 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003907 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003908 {
3909 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3910 {
3911 return false;
3912 }
3913
3914 if (operandValues.size() != rank)
3915 {
3916 return false;
3917 }
3918
3919 return true;
3920 };
3921
3922 if (!ValidateInputOperands(*beginOperand, beginValues)
3923 || !ValidateInputOperands(*endOperand, endValues)
3924 || !ValidateInputOperands(*stridesOperand, stridesValues))
3925 {
3926 return Fail("%s: Operation has invalid input operand", __func__);
3927 }
3928
3929 // Stride cannot have value '0'
3930 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3931 {
3932 return Fail("%s: Stride must be non-zero value.", __func__);
3933 }
3934
3935 armnn::StridedSliceDescriptor descriptor;
3936 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3937 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3938 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3939 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3940
3941 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3942 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3943 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3944 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3945 {
3946 return Fail("%s: Operation has invalid inputs", __func__);
3947 }
3948
3949 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003950 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3951 {
3952 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3953 IsStridedSliceSupported,
3954 data.m_Backends,
3955 isSupported,
3956 inputInfo,
3957 outputInfo,
3958 descriptor);
3959 };
3960
3961 if(IsDynamicTensor(outputInfo))
3962 {
3963 isSupported = AreDynamicTensorsSupported();
3964 }
3965 else
3966 {
3967 validateFunc(outputInfo, isSupported);
3968 }
3969
Mike Kelly46272802019-08-14 17:00:48 +01003970 if (!isSupported)
3971 {
3972 return false;
3973 }
3974
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003975 // Check if slice can fit in a inferred output
3976 armnn::TensorShape inputShape = inputInfo.GetShape();
3977 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3978 {
3979 int stride = descriptor.m_Stride[i];
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003980
3981 if (descriptor.m_ShrinkAxisMask & (1 << i))
3982 {
3983 // If the difference between the start point and the end point of the slice on an axis being shrunk
3984 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3985 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3986 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3987 {
3988 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3989 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003990
3991 if(stride < 0)
3992 {
3993 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3994 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003995 }
3996 }
3997
Mike Kelly46272802019-08-14 17:00:48 +01003998 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01003999 if (!layer)
4000 {
4001 return Fail("%s: Could not add the StridedSliceLayer", __func__);
4002 }
Mike Kelly46272802019-08-14 17:00:48 +01004003 input.Connect(layer->GetInputSlot(0));
4004
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004005 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004006}
4007
4008template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004009 typename HalOperation = typename HalPolicy::Operation,
4010 typename HalModel = typename HalPolicy::Model>
4011bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01004012{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004013 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01004014 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01004015
4016 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4017 if (!input.IsValid())
4018 {
4019 return Fail("%s: Operation has invalid inputs", __func__);
4020 }
4021
4022 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4023 unsigned int rank = inputInfo.GetNumDimensions();
4024 if (rank > 4)
4025 {
4026 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
4027 }
4028
4029 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
4030 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004031 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01004032
4033 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01004034 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01004035 {
Mike Kelly46272802019-08-14 17:00:48 +01004036 for (unsigned int i = rank; i > 0; i--)
4037 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01004038 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01004039 }
4040 }
Mike Kellyeec836e2020-02-18 10:03:30 +00004041 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01004042 {
Mike Kellyeec836e2020-02-18 10:03:30 +00004043 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01004044 }
4045
4046 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
4047
Mike Kelly4a956582020-02-28 10:32:09 +00004048 armnn::TransposeDescriptor transposeDesc;
4049 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01004050
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00004051 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01004052 if (!output)
4053 {
4054 return Fail("%s: Could not read output 0", __func__);
4055 }
4056
4057 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4058
4059 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004060 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4061 {
4062 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4063 IsTransposeSupported,
4064 data.m_Backends,
4065 isSupported,
4066 inputInfo,
4067 outputInfo,
4068 transposeDesc);
4069 };
4070
4071 if(IsDynamicTensor(outputInfo))
4072 {
4073 isSupported = AreDynamicTensorsSupported();
4074 }
4075 else
4076 {
4077 validateFunc(outputInfo, isSupported);
4078 }
4079
Mike Kelly46272802019-08-14 17:00:48 +01004080 if (!isSupported)
4081 {
4082 return false;
4083 }
4084
Mike Kelly4a956582020-02-28 10:32:09 +00004085 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01004086 if (!layer)
4087 {
4088 return Fail("%s: Could not add the TransposeLayer", __func__);
4089 }
Mike Kelly46272802019-08-14 17:00:48 +01004090 input.Connect(layer->GetInputSlot(0));
4091
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004092 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01004093}
4094
4095template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004096 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004097 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004098 typename HalModel = typename HalPolicy::Model>
4099bool ConvertBatchToSpaceNd(const HalOperation& operation,
4100 const HalModel& model,
4101 ConversionData& data)
4102{
Finn Williams23b87b32019-07-30 11:44:05 +01004103
4104 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4105 if (!input.IsValid())
4106 {
4107 return Fail("%s: Operation has invalid inputs", __func__);
4108 }
4109
4110 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4111 if (!output)
4112 {
4113 return Fail("%s: Could not read output 0", __func__);
4114 }
4115
4116 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004117
4118 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4119 if (!blockOperand)
4120 {
4121 return Fail("%s: Could not read input 1", __func__);
4122 }
4123
4124 // Convert the block operand to int32
4125 std::vector<int32_t> block;
4126 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4127 {
4128 return Fail("%s: Input 1 has invalid values", __func__);
4129 }
4130
4131 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4132
4133 unsigned int rank = inputInfo.GetNumDimensions();
4134 if (rank != 4)
4135 {
4136 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4137 }
4138
4139 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4140 {
4141 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4142 " greater than or equal to 1", __func__);
4143 }
4144
4145 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4146 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4147 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4148
Kevin May42477c12020-03-26 13:34:14 +00004149 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004150 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004151 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004152 }
4153 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4154 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4155
4156 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004157 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4158 {
4159 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4160 IsBatchToSpaceNdSupported,
4161 data.m_Backends,
4162 isSupported,
4163 inputInfo,
4164 outputInfo,
4165 batchToSpaceNdDesc);
4166 };
4167
4168 if(!IsDynamicTensor(outputInfo))
4169 {
4170 validateFunc(outputInfo, isSupported);
4171 }
4172 else
4173 {
4174 isSupported = AreDynamicTensorsSupported();
4175 }
4176
4177
Finn Williams23b87b32019-07-30 11:44:05 +01004178 if (!isSupported)
4179 {
4180 return false;
4181 }
4182
4183 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Mike Kellye2d611e2021-10-14 12:35:58 +01004184 if (!layer)
4185 {
4186 return Fail("%s: Could not add the BatchToSpaceNdLayer", __func__);
4187 }
Finn Williams23b87b32019-07-30 11:44:05 +01004188 input.Connect(layer->GetInputSlot(0));
4189
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004190 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004191}
Mike Kelly0a879362019-07-29 16:56:31 +01004192
Finn Williamsd74c5052019-07-30 17:06:00 +01004193template<typename HalPolicy,
4194 typename HalOperation = typename HalPolicy::Operation,
4195 typename HalOperand = typename HalPolicy::Operand,
4196 typename HalModel = typename HalPolicy::Model>
4197bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4198{
4199 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4200 if (!input.IsValid())
4201 {
4202 return Fail("%s: Operation has invalid inputs", __func__);
4203 }
4204
4205 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4206 unsigned int rank = inputInfo.GetNumDimensions();
4207 unsigned int spatialDim = rank - 2;
4208
4209 if (rank != 4)
4210 {
4211 Fail("%s: Only inputs with rank 4 are supported", __func__);
4212 }
4213
4214 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4215 if (!output)
4216 {
4217 return Fail("%s: Could not read output 0", __func__);
4218 }
4219
4220 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004221
4222 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4223 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4224
4225 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4226 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4227 {
4228 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4229 }
4230
4231 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004232 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4233 {
4234 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4235 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004236 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4237 {
4238 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4239 }
4240
4241 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4242 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4243 {
4244 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4245 }
4246
4247 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4248 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004249 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4250 {
4251 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4252 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004253 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4254 {
4255 int paddingBeforeInput = paddings[i];
4256 int paddingAfterInput = paddings[i + 1];
4257 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4258 {
4259 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4260 }
4261
Mike Kelly1b46d132021-11-03 11:12:45 +00004262 paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
4263 static_cast<unsigned int>(paddingAfterInput));
Finn Williamsd74c5052019-07-30 17:06:00 +01004264 }
4265
4266 armnn::SpaceToBatchNdDescriptor descriptor;
4267 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4268 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4269 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4270
Kevin May42477c12020-03-26 13:34:14 +00004271 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004272 {
4273 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4274 }
4275
4276 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004277 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4278 {
4279 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4280 IsSpaceToBatchNdSupported,
4281 data.m_Backends,
4282 isSupported,
4283 inputInfo,
4284 outputInfo,
4285 descriptor);
4286 };
4287
4288 if(IsDynamicTensor(outputInfo))
4289 {
4290 isSupported = AreDynamicTensorsSupported();
4291 }
4292 else
4293 {
4294 validateFunc(outputInfo, isSupported);
4295 }
4296
Finn Williamsd74c5052019-07-30 17:06:00 +01004297 if (!isSupported)
4298 {
4299 return false;
4300 }
4301
4302 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Mike Kellye2d611e2021-10-14 12:35:58 +01004303 if (!layer)
4304 {
4305 return Fail("%s: Could not add the BatchToSpaceLayer", __func__);
4306 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004307 input.Connect(layer->GetInputSlot(0));
4308
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004309 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004310}
4311
saoste01b8471482018-10-10 09:44:51 +01004312} // namespace armnn_driver