blob: 830502df519a311ba62e3ab3c6258318a2604260 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
Finn Williams199dba82020-08-19 22:54:00 +010047 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010048 {}
49
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010050 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010051 armnn::INetworkPtr m_Network;
52 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
53 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams199dba82020-08-19 22:54:00 +010054 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010055};
56
57class LayerInputHandle
58{
59public:
60 LayerInputHandle();
61 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
62
63 bool IsValid() const;
64
65 void Connect(armnn::IInputSlot& inputSlot);
66
Finn Williamsa4983ce2020-07-23 12:55:12 +010067 void Disconnect(armnn::IInputSlot& inputSlot);
68
arovir01b0717b52018-09-05 17:03:25 +010069 const armnn::TensorInfo& GetTensorInfo() const;
70
71private:
72 armnn::IOutputSlot* m_OutputSlot;
73 bool m_Valid;
74 armnn::TensorInfo m_TensorInfo;
75};
76
77class ConstTensorPin
78{
79public:
80 // Creates an invalid tensor pin (can be used to signal errors)
81 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
82 ConstTensorPin(bool optional = false);
83
84 // @param tensorInfo TensorInfo associated with the tensor.
85 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
86 // the model being converted.
87 // @param numBytes Number of bytes for the tensor data.
88 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
89 const armnn::PermutationVector& mappings);
90
91 ConstTensorPin(const ConstTensorPin& other) = delete;
92 ConstTensorPin(ConstTensorPin&& other) = default;
93
94 bool IsValid() const;
95 bool IsOptional() const;
96
97 const armnn::ConstTensor& GetConstTensor() const;
98 const armnn::ConstTensor* GetConstTensorPtr() const;
99
100private:
101 armnn::ConstTensor m_ConstTensor;
102
103 // Owned memory for swizzled tensor data, only required if the tensor needed
104 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
105 // the pools associated with the model being converted.
106 std::vector<uint8_t> m_SwizzledTensorData;
107
108 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
109 bool m_Optional;
110};
111
112} // namespace armnn_driver
113
114///
115/// Utility functions
116///
117
118namespace
119{
120
121using namespace armnn_driver;
122using namespace android::nn;
123
124// Convenience function to log the reason for failing to convert a model.
125// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
126template<class... Args>
127static bool Fail(const char* formatStr, Args&&... args)
128{
129 ALOGD(formatStr, std::forward<Args>(args)...);
130 return false;
131}
132
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100133// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
134// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
135#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100136try \
137{ \
138 for (auto&& backendId : backends) \
139 { \
140 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
141 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100142 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100143 std::string reasonIfUnsupported; \
144 supported = \
145 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
146 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100149 } \
150 else \
151 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100152 if (reasonIfUnsupported.size() > 0) \
153 { \
154 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
155 } \
156 else \
157 { \
158 ALOGD("%s: not supported by armnn", funcName); \
159 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100160 } \
161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100163 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100164 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100165 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100166 } \
167 if (!supported) \
168 { \
169 ALOGD("%s: not supported by any specified backend", funcName); \
170 } \
171} \
172catch (const armnn::InvalidArgumentException &e) \
173{ \
174 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
175}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100176
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000177template<typename HalOperand>
178armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100179{
180 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
181}
182
Matthew Bentham912b3622019-05-03 15:49:14 +0100183inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100184{
Matthew Bentham912b3622019-05-03 15:49:14 +0100185 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
186 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100188}
189
Kevin May42477c12020-03-26 13:34:14 +0000190#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100191
Keith Davis71006492020-01-06 17:44:16 +0000192// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100193inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
194{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000195 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000196 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_FLOAT16 ||
198 type == V1_2::OperandType::TENSOR_FLOAT32 ||
199 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000200 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
202 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203 type == V1_2::OperandType::TENSOR_INT32;
204}
205
206#endif
207
Kevin May42477c12020-03-26 13:34:14 +0000208#ifdef ARMNN_ANDROID_NN_V1_3
209
210// Support within the 1.3 driver for specific tensor data types
211inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
212{
213 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_FLOAT16 ||
216 type == V1_3::OperandType::TENSOR_FLOAT32 ||
217 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100218 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000219 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
220 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
221 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
222 type == V1_3::OperandType::TENSOR_INT32;
223}
224
225#endif
226
Mike Kellyb5fdf382019-06-11 16:35:25 +0100227inline bool IsBool(V1_0::Operand)
228{
229 return false;
230}
231
Kevin May42477c12020-03-26 13:34:14 +0000232inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100233{
234 return false;
235}
236
Kevin May42477c12020-03-26 13:34:14 +0000237#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100238
239inline bool IsBool(V1_2::Operand operand)
240{
241 return operand.type == V1_2::OperandType::BOOL;
242}
243
Sadik Armagan61113162019-07-25 09:09:40 +0100244/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000245inline bool Is12OrLaterOperand(V1_2::Operand)
246{
247 return true;
248}
249
250#endif
251
252#ifdef ARMNN_ANDROID_NN_V1_3
253
254inline bool IsBool(V1_3::Operand operand)
255{
256 return operand.type == V1_3::OperandType::BOOL;
257}
258
259/// Checks if a operand is 1_2 Operand
260inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100261{
262 return true;
263}
264
Mike Kellyb5fdf382019-06-11 16:35:25 +0100265#endif
266
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100267template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000268armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
269 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100270 armnn::TensorInfo reshapeInfo)
271{
272 armnn::ReshapeDescriptor reshapeDescriptor;
273 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
274
275 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100276 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100277
278 // Attach the input layer to the reshape layer
279 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
280 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
281
282 return *reshapeLayer;
283}
284
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000285bool BroadcastTensor(LayerInputHandle& input0,
286 LayerInputHandle& input1,
287 armnn::IConnectableLayer* startLayer,
288 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100289{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100290 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100291
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100292 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
293 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
294
295 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
296 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
297
298 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100299 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100300 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
301 input0.Connect(startLayer->GetInputSlot(0));
302 input1.Connect(startLayer->GetInputSlot(1));
303
Sadik Armagan64b19b52019-08-19 09:49:58 +0100304 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 }
306
307 // Since the number of dimensions do not match then we need to add degenerate dimensions
308 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
309
310 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
311 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
312 boost::numeric_cast<int>(inputDimensions1));
313
314 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
315 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
316 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
317
318 const armnn::TensorShape& smallShape = smallInfo.GetShape();
319 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
320 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
321 {
322 reshapedDimensions[i] = smallShape[i - sizeDifference];
323 }
324
325 armnn::TensorInfo reshapedInfo = smallInfo;
326 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
327 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100328
329 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
330 armnn::ReshapeDescriptor reshapeDescriptor;
331
332 bool isSupported = false;
333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
334 IsReshapeSupported,
335 data.m_Backends,
336 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000337 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100338 reshapedInfo,
339 reshapeDescriptor);
340 if (!isSupported)
341 {
342 return false;
343 }
344
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100345 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100346 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100347
348 if (input0IsSmaller)
349 {
350 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
351 //
352 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100353 // | |
354 // Reshape |
355 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100356 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100357
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100358 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
359 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100360 }
361 else
362 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100363 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
364 //
365 // Input0 Input1
366 // | |
367 // | Reshape
368 // \ /
369 // StartLayer
370
arovir01b0717b52018-09-05 17:03:25 +0100371 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100372 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100373 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100374
375 return true;
arovir01b0717b52018-09-05 17:03:25 +0100376}
377
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000378void CalcPadding(uint32_t input,
379 uint32_t kernel,
380 uint32_t stride,
381 uint32_t& outPadHead,
382 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100383 android::nn::PaddingScheme scheme)
384{
385 int32_t padHead;
386 int32_t padTail;
387 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
388 outPadHead = boost::numeric_cast<uint32_t>(padHead);
389 outPadTail = boost::numeric_cast<uint32_t>(padTail);
390}
391
Kevin May42477c12020-03-26 13:34:14 +0000392#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100393
394void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
395 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
396{
397 int32_t padHead;
398 int32_t padTail;
399 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
400 outPadHead = boost::numeric_cast<uint32_t>(padHead);
401 outPadTail = boost::numeric_cast<uint32_t>(padTail);
402}
403
Mike Kelly26123db2020-01-15 10:02:33 +0000404void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100405 int32_t& outPadTail, android::nn::PaddingScheme scheme)
406{
407 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
408}
409
Mike Kelly86b36d42019-07-12 16:39:33 +0100410#endif
411
Matthew Bentham912b3622019-05-03 15:49:14 +0100412Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100413{
414 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100415 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100416 shape.dimensions = operand.dimensions;
417 shape.scale = operand.scale;
418 shape.offset = operand.zeroPoint;
419 return shape;
420}
421
Kevin May42477c12020-03-26 13:34:14 +0000422#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100423
424Shape GetOperandShape(const V1_2::Operand& operand)
425{
426 Shape shape;
427 shape.type = OperandType(operand.type);
428 shape.dimensions = operand.dimensions;
429 shape.scale = operand.scale;
430 shape.offset = operand.zeroPoint;
431 return shape;
432}
433
434#endif
435
Kevin May42477c12020-03-26 13:34:14 +0000436#ifdef ARMNN_ANDROID_NN_V1_3
437
438Shape GetOperandShape(const V1_3::Operand& operand)
439{
440 Shape shape;
441 shape.type = OperandType(operand.type);
442 shape.dimensions = operand.dimensions;
443 shape.scale = operand.scale;
444 shape.offset = operand.zeroPoint;
445 return shape;
446}
447
448#endif
449
arovir01b0717b52018-09-05 17:03:25 +0100450// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
451// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100452// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
453// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100454void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000455 const armnn::TensorInfo& weightInfo,
456 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100457{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000458 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 // NOTE: Bias scale is always set to 0 for per-axis quantization and
461 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
462 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100463 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 return biasScale * inputInfo.GetQuantizationScale();
465 };
466
467 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
468 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
469
470 biasInfo.SetQuantizationScales(biasScales);
471 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
472
473 ALOGV("Bias quantization params have been updated for per-axis quantization");
474 }
475 else
476 {
477 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
478 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
479 {
480 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
481 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
482 {
483 ALOGW("Bias quantization scale has been modified to match input * weights");
484 biasInfo.SetQuantizationScale(expectedBiasScale);
485 }
arovir01b0717b52018-09-05 17:03:25 +0100486 }
487 }
488}
489
490// 4D Tensor Permutations
491const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100492const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
493
494// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000495const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
496const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100497
498template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000499armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
500 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100501{
502 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000503 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100504
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100505 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100506
507 // Connect input to swizzle layer
508 input.Connect(layer->GetInputSlot(0));
509
510 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000511 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100512 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
513
514 return *layer;
515}
516
arovir01b0717b52018-09-05 17:03:25 +0100517bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
518 const armnn::TensorShape & outputShape,
519 uint32_t concatDim)
520{
521 // Validate the output shape is correct given the input shapes (which have just been validated)
522 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
523 if (outputShape.GetNumDimensions() != numDimensions)
524 {
525 return Fail("%s: Output shape has wrong number of dimensions", __func__);
526 }
527
528 unsigned int outputSizeAlongConcatenatedDimension = 0;
529 for (unsigned int i = 0; i < inputShapes.size(); i++)
530 {
531 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
532 }
533
534 for (unsigned int i = 0; i < numDimensions; ++i)
535 {
536 if (i == concatDim)
537 {
538 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
539 {
540 return Fail(
541 "%s: Invalid output shape for dimension %d (%d != %d)",
542 __func__,
543 i,
544 outputShape[i],
545 outputSizeAlongConcatenatedDimension);
546 }
547 }
548 else
549 {
550 if (outputShape[i] != inputShapes[0][i])
551 {
552 return Fail("%s: Invalid output shape", __func__);
553 }
554 }
555 }
556
557 return true;
558}
559
560bool RequiresReshape(armnn::TensorShape & inputShape)
561{
562 return inputShape.GetNumDimensions() < 3;
563}
564
arovir01b0717b52018-09-05 17:03:25 +0100565void SwizzleInputs(armnn::INetwork& network,
566 std::vector<LayerInputHandle>& inputs,
567 std::vector<armnn::TensorShape>& inputShapes,
568 const armnn::PermutationVector& mapping)
569{
570 if (!mapping.IsEqual(IdentityPermutation4D))
571 {
572 size_t nInputs = inputs.size();
573 for (size_t i=0; i<nInputs; ++i)
574 {
575 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000576 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100577 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
578 auto& outputInfo = outputSlot.GetTensorInfo();
579 // replace inputs with the swizzled ones
580 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
581 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
582 }
583 }
584}
585
Teresa Charlin185f5882020-04-06 21:59:18 +0100586bool TransposeInputTensors(ConversionData& data,
587 std::vector<LayerInputHandle>& inputs,
588 std::vector<armnn::TensorShape>& inputShapes,
589 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590{
591 if (!mapping.IsEqual(IdentityPermutation4D))
592 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100593 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000594 size_t nInputs = inputs.size();
595 for (size_t i=0; i<nInputs; ++i)
596 {
597 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000598 armnn::TransposeDescriptor transposeDesc;
599 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100600 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601
602 bool isSupported = false;
603 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000604 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000605 data.m_Backends,
606 isSupported,
607 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100608 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000609 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000610 if (!isSupported)
611 {
612 return false;
613 }
614
615 }
616 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
617 }
618 return true;
619}
620
621
narpra01f176d5a2018-11-18 20:17:48 +0000622bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
623 int32_t & concatDimension,
624 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100625{
narpra01f176d5a2018-11-18 20:17:48 +0000626 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100627 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100628
629 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000630 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
631 // or along dimension 0 or 2 for a 3-D tensor.
632 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100633 {
narpra01f176d5a2018-11-18 20:17:48 +0000634 concatDimension = 1;
635 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
636 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100637 }
narpra01f176d5a2018-11-18 20:17:48 +0000638 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100639 {
narpra01f176d5a2018-11-18 20:17:48 +0000640 concatDimension = 0;
641 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
642 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100643 }
narpra01f176d5a2018-11-18 20:17:48 +0000644 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100645}
646
647} // anonymous namespace
648
649namespace armnn_driver
650{
651
652//// Creates an ArmNN activation layer and connects it to the given layer, if the
653//// passed in AndroidNN activation function requires so.
654//// @return The end layer of the sequence of layers built for the given AndroidNN
655//// activation function or nullptr if an error occurred (e.g. unsupported activation).
656//// Note that the end layer matches the input layer if no activation is required
657//// (the sequence of layers has length 1).
658armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
659 ActivationFn activation,
660 armnn::IConnectableLayer* prevLayer,
661 ConversionData& data);
662
663} // namespace armnn_driver
664
665///
666/// Utility templates
667///
668
669namespace armnn_driver
670{
671
672using namespace android::nn;
673
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674template<typename HalPolicy,
675 typename HalOperand = typename HalPolicy::Operand,
676 typename HalOperation = typename HalPolicy::Operation,
677 typename HalModel = typename HalPolicy::Model>
678const HalOperand* GetInputOperand(const HalOperation& operation,
679 uint32_t inputIndex,
680 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100681 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100682{
683 if (inputIndex >= operation.inputs.size())
684 {
saoste01b8471482018-10-10 09:44:51 +0100685 if (failOnIndexOutOfBounds)
686 {
687 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
688 }
arovir01b0717b52018-09-05 17:03:25 +0100689 return nullptr;
690 }
691
Kevin May42477c12020-03-26 13:34:14 +0000692 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100693 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000694 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100695}
696
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100697template<typename HalPolicy,
698 typename HalOperand = typename HalPolicy::Operand,
699 typename HalOperation = typename HalPolicy::Operation,
700 typename HalModel = typename HalPolicy::Model>
701const HalOperand* GetOutputOperand(const HalOperation& operation,
702 uint32_t outputIndex,
703 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100704{
705 if (outputIndex >= operation.outputs.size())
706 {
707 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
708 return nullptr;
709 }
710
711 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100712 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100713
Kevin May42477c12020-03-26 13:34:14 +0000714 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100715}
716
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100717template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100718 typename HalOperand = typename HalPolicy::Operand,
719 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100720const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100721 const HalModel& model,
722 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000723 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100724{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100726
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100727 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100728 switch (operand.lifetime)
729 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100730 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100731 {
732 // Constant found in model.operandValues
733 valueStart = &model.operandValues[operand.location.offset];
734 break;
735 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100736 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100737 {
738 // Constant specified via a Memory object
739 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
740 break;
741 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000743 {
744 // An optional input tensor with no values is not an error so should not register as a fail
745 if (optional)
746 {
747 valueStart = nullptr;
748 break;
749 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100750 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000751 }
arovir01b0717b52018-09-05 17:03:25 +0100752 default:
753 {
754 // Unsupported/invalid (e.g. can't get value of an input to the model)
755 Fail("%s: unsupported/invalid operand lifetime: %s",
756 __func__, toString(operand.lifetime).c_str());
757 valueStart = nullptr;
758 }
759 }
760
761 return valueStart;
762}
763
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100765 typename HalOperation = typename HalPolicy::Operation,
766 typename HalModel = typename HalPolicy::Model,
767 typename HalOperandType = typename HalPolicy::OperandType>
768bool GetOperandType(const HalOperation& operation,
769 uint32_t inputIndex,
770 const HalModel& model,
771 HalOperandType& type)
772{
773 using HalOperand = typename HalPolicy::Operand;
774
775 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
776 if (!operand)
777 {
778 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
779 }
780
781 type = operand->type;
782 return true;
783}
784
785template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000786 typename HalOperand = typename HalPolicy::Operand>
787bool IsOperandConstant(const HalOperand& operand)
788{
789 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
790
791 HalOperandLifeTime lifetime = operand.lifetime;
792
793 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
794 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
795 lifetime == HalOperandLifeTime::NO_VALUE;
796}
797
798template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100799 typename HalOperand = typename HalPolicy::Operand,
800 typename HalModel = typename HalPolicy::Model>
801ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
802 const HalModel& model,
803 const ConversionData& data,
804 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
805 const armnn::TensorShape* overrideTensorShape = nullptr,
806 bool optional = false)
807{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808 if (!IsOperandTypeSupportedForTensors(operand.type))
809 {
810 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
811 return ConstTensorPin();
812 }
813
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000814 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100815 {
816 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
817 return ConstTensorPin();
818 }
819
820 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
821 if (!valueStart)
822 {
823 if (optional)
824 {
825 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
826 return ConstTensorPin(true);
827 }
828 // mandatory tensor with no values
829 Fail("%s: failed to get operand address", __func__);
830 return ConstTensorPin();
831 }
832
833 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000834 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
835 if (tensorInfo.HasPerAxisQuantization())
836 {
837 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
838 }
839
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100840 if (overrideTensorShape != nullptr)
841 {
842 tensorInfo.SetShape(*overrideTensorShape);
843 }
844 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
845}
846
847template<typename HalPolicy,
848 typename HalOperation = typename HalPolicy::Operation,
849 typename HalModel = typename HalPolicy::Model>
850ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
851 uint32_t inputIndex,
852 const HalModel& model,
853 const ConversionData& data,
854 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
855 const armnn::TensorShape* overrideTensorShape = nullptr,
856 bool optional = false)
857{
858 using HalOperand = typename HalPolicy::Operand;
859
860 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
861 if (!operand)
862 {
863 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
864 return ConstTensorPin();
865 }
866 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
867 model,
868 data,
869 dimensionMappings,
870 overrideTensorShape,
871 optional);
872}
873
874template<typename HalPolicy,
875 typename OutputType,
876 typename HalOperandType = typename HalPolicy::OperandType,
877 typename HalOperation = typename HalPolicy::Operation,
878 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100879bool GetInputScalar(const HalOperation& operation,
880 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100881 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100882 OutputType& outValue,
883 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100884 const ConversionData& data,
885 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100886{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100887 using HalOperand = typename HalPolicy::Operand;
888
889 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100890 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100891 {
892 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
893 }
894
Sadik Armagan813f2302020-05-19 14:10:30 +0100895 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100896 {
897 return Fail("%s: unexpected operand type: %s (should be %s)",
898 __func__, toString(operand->type).c_str(), toString(type).c_str());
899 }
900
Sadik Armagan813f2302020-05-19 14:10:30 +0100901 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100902 {
903 return Fail("%s: incorrect operand location length: %i (should be %i)",
904 __func__, operand->location.length, sizeof(OutputType));
905 }
906
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100907 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100908 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100909 {
910 return Fail("%s: failed to get address for operand", __func__);
911 }
912
Sadik Armagan813f2302020-05-19 14:10:30 +0100913 if(!optional)
914 {
915 outValue = *(static_cast<const OutputType*>(valueAddress));
916 }
917
arovir01b0717b52018-09-05 17:03:25 +0100918 return true;
919}
920
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100921template<typename HalPolicy,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100924bool GetInputInt32(const HalOperation& operation,
925 uint32_t inputIndex,
926 int32_t& outValue,
927 const HalModel& model,
928 const ConversionData& data)
929{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100930 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100931}
932
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933template<typename HalPolicy,
934 typename HalOperation = typename HalPolicy::Operation,
935 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100936bool GetInputFloat32(const HalOperation& operation,
937 uint32_t inputIndex,
938 float& outValue,
939 const HalModel& model,
940 const ConversionData& data)
941{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100942 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100943}
944
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945template<typename HalPolicy,
946 typename HalOperation = typename HalPolicy::Operation,
947 typename HalOperandType = typename HalPolicy::OperandType,
948 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100949bool GetInputActivationFunctionImpl(const HalOperation& operation,
950 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100951 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100952 ActivationFn& outActivationFunction,
953 const HalModel& model,
954 const ConversionData& data)
955{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100956 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100957 {
958 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
959 __func__,
960 toString(type).c_str(),
961 toString(OperandType::INT32).c_str(),
962 toString(OperandType::TENSOR_INT32).c_str());
963 }
964
965 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100967 {
968 return Fail("%s: failed to get activation input value", __func__);
969 }
970 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
971 return true;
972}
973
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100974template<typename HalPolicy,
975 typename HalOperation = typename HalPolicy::Operation,
976 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100977bool GetInputActivationFunction(const HalOperation& operation,
978 uint32_t inputIndex,
979 ActivationFn& outActivationFunction,
980 const HalModel& model,
981 const ConversionData& data)
982{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100983 return GetInputActivationFunctionImpl<HalPolicy>(operation,
984 inputIndex,
985 HalPolicy::OperandType::INT32,
986 outActivationFunction,
987 model,
988 data);
arovir01b0717b52018-09-05 17:03:25 +0100989}
990
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100991template<typename HalPolicy,
992 typename HalOperation = typename HalPolicy::Operation,
993 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100994bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
995 uint32_t inputIndex,
996 ActivationFn& outActivationFunction,
997 const HalModel& model,
998 const ConversionData& data)
999{
1000 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1002 inputIndex,
1003 HalPolicy::OperandType::INT32,
1004 outActivationFunction,
1005 model,
1006 data);
arovir01b0717b52018-09-05 17:03:25 +01001007}
1008
1009
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001010template<typename HalPolicy,
1011 typename HalOperation = typename HalPolicy::Operation,
1012 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001013bool GetOptionalInputActivation(const HalOperation& operation,
1014 uint32_t inputIndex,
1015 ActivationFn& activationFunction,
1016 const HalModel& model,
1017 const ConversionData& data)
1018{
1019 if (operation.inputs.size() <= inputIndex)
1020 {
1021 activationFunction = ActivationFn::kActivationNone;
1022 }
1023 else
1024 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001025 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001026 {
1027 return Fail("%s: Operation has invalid inputs", __func__);
1028 }
1029 }
1030 return true;
1031}
1032
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033template<typename HalPolicy,
1034 typename ConvolutionDescriptor,
1035 typename HalOperation = typename HalPolicy::Operation,
1036 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001037bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1038 uint32_t dilationXIndex,
1039 ConvolutionDescriptor& descriptor,
1040 const HalModel& model,
1041 const ConversionData& data)
1042{
1043 bool success = true;
1044 if (operation.inputs.size() >= dilationXIndex + 2)
1045 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001046 success &= GetInputScalar<HalPolicy>(operation,
1047 dilationXIndex,
1048 HalPolicy::OperandType::INT32,
1049 descriptor.m_DilationX,
1050 model,
1051 data);
1052 success &= GetInputScalar<HalPolicy>(operation,
1053 dilationXIndex + 1,
1054 HalPolicy::OperandType::INT32,
1055 descriptor.m_DilationY,
1056 model,
1057 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001058 }
1059
1060 return success;
1061}
1062
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001063template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001064 typename HalOperation = typename HalPolicy::Operation,
1065 typename HalModel = typename HalPolicy::Model>
1066bool GetOptionalBool(const HalOperation& operation,
1067 uint32_t inputIndex,
1068 const HalModel& model,
1069 const ConversionData& data)
1070{
1071 using HalOperand = typename HalPolicy::Operand;
1072
1073 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1074 if (!operand)
1075 {
1076 return false;
1077 }
1078
1079 if (!IsBool(*operand))
1080 {
1081 return false;
1082 }
1083
1084 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1085 if (!valueAddress)
1086 {
1087 return false;
1088 }
1089
1090 if (*(static_cast<const bool*>(valueAddress)))
1091 {
1092 return true;
1093 }
1094 else
1095 {
1096 return false;
1097 }
1098}
1099
1100template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001101 typename HalOperand = typename HalPolicy::Operand,
1102 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001103bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001104 std::vector<int32_t>& outValues,
1105 const HalModel& model,
1106 const ConversionData& data)
1107{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001109 {
1110 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1111 }
1112
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001113 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001114 if (!startAddress)
1115 {
1116 return Fail("%s: failed to get operand address", __func__, operand.type);
1117 }
1118
1119 // Check number of bytes is sensible
1120 const uint32_t numBytes = operand.location.length;
1121 if (numBytes % sizeof(int32_t) != 0)
1122 {
1123 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1124 __func__, numBytes, sizeof(int32_t));
1125 }
1126
1127 outValues.resize(numBytes / sizeof(int32_t));
1128 memcpy(outValues.data(), startAddress, numBytes);
1129 return true;
1130}
1131
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001132template<typename HalPolicy,
1133 typename HalOperation = typename HalPolicy::Operation,
1134 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001135bool GetInputPaddingScheme(const HalOperation& operation,
1136 uint32_t inputIndex,
1137 PaddingScheme& outPaddingScheme,
1138 const HalModel& model,
1139 const ConversionData& data)
1140{
1141 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001143 {
1144 return Fail("%s: failed to get padding scheme input value", __func__);
1145 }
1146
1147 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1148 return true;
1149}
1150
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001151template<typename HalPolicy,
1152 typename HalOperation = typename HalPolicy::Operation,
1153 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001154LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1155 uint32_t inputIndex,
1156 const HalModel& model,
1157 ConversionData& data)
1158{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001159 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001160 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001161 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1162
1163 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001164 if (!operand)
1165 {
1166 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1167 return LayerInputHandle();
1168 }
1169
1170 if (!IsOperandTypeSupportedForTensors(operand->type))
1171 {
1172 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1173 return LayerInputHandle();
1174 }
1175
Sadik Armagan44bcc022019-06-18 17:21:36 +01001176 try
arovir01b0717b52018-09-05 17:03:25 +01001177 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001178 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001179 if (IsDynamicTensor(operandTensorInfo))
1180 {
1181 Fail("%s: dynamic input tensors are not supported", __func__);
1182 return LayerInputHandle();
1183 }
arovir01b0717b52018-09-05 17:03:25 +01001184
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001186 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001187 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001188 {
1189 // NOTE: We must check whether we can support the input tensor on at least one
1190 // of the provided backends; otherwise we cannot convert the operation
1191 bool isInputSupported = false;
1192 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1193 IsInputSupported,
1194 data.m_Backends,
1195 isInputSupported,
1196 operandTensorInfo);
1197
1198 if (!isInputSupported)
1199 {
1200 Fail("%s: unsupported input tensor", __func__);
1201 return LayerInputHandle();
1202 }
1203
1204 BOOST_FALLTHROUGH; // intentional fallthrough
1205 }
1206 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001207 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001208 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001209 // The tensor is either an operand internal to the model, or a model input.
1210 // It can be associated with an ArmNN output slot for an existing layer.
1211
1212 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1213 const uint32_t operandIndex = operation.inputs[inputIndex];
1214 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001215 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001216 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001217 case HalOperandLifeTime::CONSTANT_REFERENCE:
1218 {
1219 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1220 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1221 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001222 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001223 bool isSupported = false;
1224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1225 IsConstantSupported,
1226 data.m_Backends,
1227 isSupported,
1228 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001229 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001230 {
1231 return LayerInputHandle();
1232 }
1233
1234 armnn::IConnectableLayer* constantLayer =
1235 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1236 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1237 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1238
1239 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1240 }
1241 else
1242 {
1243 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001244 return LayerInputHandle();
1245 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001246 break;
arovir01b0717b52018-09-05 17:03:25 +01001247 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001248 default:
arovir01b0717b52018-09-05 17:03:25 +01001249 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001250 // Unsupported lifetime for an input tensor
1251 Fail("%s: unsupported lifetime for input tensor: %s",
1252 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001253 return LayerInputHandle();
1254 }
arovir01b0717b52018-09-05 17:03:25 +01001255 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001256 }
1257 catch (UnsupportedOperand<HalOperandType>& e)
1258 {
1259 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1260 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001261 }
1262}
1263
Kevin May42477c12020-03-26 13:34:14 +00001264
1265#ifdef ARMNN_ANDROID_NN_V1_3
1266template<typename HalPolicy>
1267LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1268 uint32_t inputIndex,
1269 const::android::hardware::neuralnetworks::V1_3::Model& model,
1270 ConversionData& data)
1271{
1272 using HalOperand = typename HalPolicy::Operand;
1273 using HalOperandType = typename HalPolicy::OperandType;
1274 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1275
1276 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1277 if (!operand)
1278 {
1279 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1280 return LayerInputHandle();
1281 }
1282
1283 if (!IsOperandTypeSupportedForTensors(operand->type))
1284 {
1285 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1286 return LayerInputHandle();
1287 }
1288
1289 try
1290 {
1291 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001292
Kevin May42477c12020-03-26 13:34:14 +00001293 if (IsDynamicTensor(operandTensorInfo))
1294 {
Finn Williams199dba82020-08-19 22:54:00 +01001295 data.m_DynamicInputsEncountered = true;
1296
Finn Williams9a044412020-08-17 19:08:35 +01001297 const uint32_t operandIndex = operation.inputs[inputIndex];
1298
1299 // Check if the dynamic input tensors have been inferred by one of the previous layers
1300 // If not we can't support them
Finn Williams199dba82020-08-19 22:54:00 +01001301 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001302 {
1303 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1304 }
1305 else
1306 {
1307 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1308 return LayerInputHandle();
1309 }
Kevin May42477c12020-03-26 13:34:14 +00001310 }
1311
1312 switch (operand->lifetime)
1313 {
1314 case HalOperandLifeTime::SUBGRAPH_INPUT:
1315 {
1316 // NOTE: We must check whether we can support the input tensor on at least one
1317 // of the provided backends; otherwise we cannot convert the operation
1318 bool isInputSupported = false;
1319 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1320 IsInputSupported,
1321 data.m_Backends,
1322 isInputSupported,
1323 operandTensorInfo);
1324
1325 if (!isInputSupported)
1326 {
1327 Fail("%s: unsupported input tensor", __func__);
1328 return LayerInputHandle();
1329 }
1330
1331 BOOST_FALLTHROUGH; // intentional fallthrough
1332 }
1333 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1334 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1335 {
1336 // The tensor is either an operand internal to the model, or a model input.
1337 // It can be associated with an ArmNN output slot for an existing layer.
1338
1339 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1340 const uint32_t operandIndex = operation.inputs[inputIndex];
1341 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1342 }
1343 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1344 case HalOperandLifeTime::CONSTANT_REFERENCE:
1345 {
1346 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1347 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1348 if (tensorPin.IsValid())
1349 {
1350 bool isSupported = false;
1351 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1352 IsConstantSupported,
1353 data.m_Backends,
1354 isSupported,
1355 tensorPin.GetConstTensor().GetInfo());
1356 if (!isSupported)
1357 {
1358 return LayerInputHandle();
1359 }
1360
1361 armnn::IConnectableLayer* constantLayer =
1362 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1363 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1364 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1365
1366 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1367 }
1368 else
1369 {
1370 Fail("%s: invalid operand tensor", __func__);
1371 return LayerInputHandle();
1372 }
1373 break;
1374 }
1375 default:
1376 {
1377 // Unsupported lifetime for an input tensor
1378 Fail("%s: unsupported lifetime for input tensor: %s",
1379 __func__, toString(operand->lifetime).c_str());
1380 return LayerInputHandle();
1381 }
1382 }
1383 }
1384 catch (UnsupportedOperand<HalOperandType>& e)
1385 {
1386 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1387 return LayerInputHandle();
1388 }
1389}
1390#endif
1391
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001392template<typename HalPolicy,
1393 typename HalOperation = typename HalPolicy::Operation,
1394 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001395bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1396 uint32_t operationOutputIndex,
1397 armnn::IConnectableLayer& layer,
1398 uint32_t layerOutputIndex,
1399 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001400 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001401 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1402 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001403{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001404 using HalOperand = typename HalPolicy::Operand;
1405
1406 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001407 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1408 {
1409 return false;
1410 }
1411
1412 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1413
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001414 if (overrideOutputInfo == nullptr)
1415 {
1416 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1417 }
1418 else
1419 {
1420 outputSlot.SetTensorInfo(*overrideOutputInfo);
1421 }
1422
1423 // Type one dynamic tensors require the previous layer's output shape for inference
1424 if (!layer.GetInputSlot(0).GetConnection() &&
1425 IsDynamicTensor(outputSlot.GetTensorInfo()))
1426 {
1427 return false;
1428 }
1429
Finn Williamsa4983ce2020-07-23 12:55:12 +01001430 bool isSupported = false;
1431 if (validateFunc &&
1432 layer.GetInputSlot(0).GetConnection() &&
1433 IsDynamicTensor(outputSlot.GetTensorInfo()))
Sadik Armagan813f2302020-05-19 14:10:30 +01001434 {
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001435 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001436 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001437 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001438 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1439
1440 if(!isSupported)
1441 {
1442 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1443 {
1444 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1445 }
1446
1447 return false;
1448 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001449 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001450
Finn Williamsa4983ce2020-07-23 12:55:12 +01001451 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1452 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1453
Mike Kellyb5fdf382019-06-11 16:35:25 +01001454 return true;
1455}
1456
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001457template<typename HalPolicy,
1458 typename HalOperation = typename HalPolicy::Operation,
1459 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001460armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1461 uint32_t inputIndex,
1462 const HalModel& model,
1463 ConversionData& data)
1464{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001465 using HalOperand = typename HalPolicy::Operand;
1466
1467 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001468 if (!operand)
1469 {
1470 return armnn::DataLayout::NHWC;
1471 }
1472
1473 if (!IsBool(*operand))
1474 {
1475 return armnn::DataLayout::NHWC;
1476 }
1477
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001478 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001479 if (!valueAddress)
1480 {
1481 return armnn::DataLayout::NHWC;
1482 }
1483
1484 if (*(static_cast<const bool*>(valueAddress)))
1485 {
1486 return armnn::DataLayout::NCHW;
1487 }
1488 else
1489 {
1490 return armnn::DataLayout::NHWC;
1491 }
1492}
1493
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001494template<typename HalPolicy,
1495 typename HalOperation = typename HalPolicy::Operation,
1496 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001497bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1498 uint32_t outputIndex,
1499 armnn::IConnectableLayer& layer,
1500 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001501 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001502 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1503 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001504{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001505 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1506 outputIndex,
1507 layer,
1508 outputIndex,
1509 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001510 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001511 overrideOutputInfo,
1512 validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001513}
1514
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001515template<typename HalPolicy,
1516 typename HalOperation = typename HalPolicy::Operation,
1517 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001518bool ConvertToActivation(const HalOperation& operation,
1519 const char* operationName,
1520 const armnn::ActivationDescriptor& activationDesc,
1521 const HalModel& model,
1522 ConversionData& data)
1523{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001524 using HalOperand = typename HalPolicy::Operand;
1525
1526 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001527 if (!input.IsValid())
1528 {
1529 return Fail("%s: Input 0 is invalid", operationName);
1530 }
1531
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001532 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001533 if (!outputOperand)
1534 {
1535 return false;
1536 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001537
1538 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001539
1540 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001541
1542 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1543 {
1544 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1545 IsActivationSupported,
1546 data.m_Backends,
1547 isSupported,
1548 input.GetTensorInfo(),
1549 outInfo,
1550 activationDesc);
1551 };
1552
1553 if(IsDynamicTensor(outInfo))
1554 {
1555 isSupported = AreDynamicTensorsSupported();
1556 }
1557 else
1558 {
1559 validateFunc(outInfo, isSupported);
1560 }
1561
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001562 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001563 {
1564 return false;
1565 }
1566
1567 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001568 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001569 input.Connect(layer->GetInputSlot(0));
1570
Finn Williamsa4983ce2020-07-23 12:55:12 +01001571 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001572}
1573
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001574template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001575 typename HalOperation = typename HalPolicy::Operation,
1576 typename HalModel = typename HalPolicy::Model>
1577bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1578{
1579 armnn::ActivationDescriptor desc;
1580 desc.m_Function = armnn::ActivationFunction::ReLu;
1581
1582 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1583}
1584
1585template<typename HalPolicy,
1586 typename HalOperation = typename HalPolicy::Operation,
1587 typename HalModel = typename HalPolicy::Model>
1588bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1589{
1590 armnn::ActivationDescriptor desc;
1591 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1592 desc.m_A = 1.0f;
1593 desc.m_B = -1.0f;
1594
1595 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1596}
1597
1598template<typename HalPolicy,
1599 typename HalOperation = typename HalPolicy::Operation,
1600 typename HalModel = typename HalPolicy::Model>
1601bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1602{
1603 armnn::ActivationDescriptor desc;
1604 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1605 desc.m_A = 6.0f;
1606
1607 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1608}
1609
1610template<typename HalPolicy,
1611 typename HalOperation = typename HalPolicy::Operation,
1612 typename HalModel = typename HalPolicy::Model>
1613bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1614{
1615 armnn::ActivationDescriptor desc;
1616 desc.m_Function = armnn::ActivationFunction::TanH;
1617 desc.m_A = 1.0f; // android nn does not support tanH parameters
1618 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1619
1620 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1621}
1622
1623template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001624 typename HalOperation = typename HalPolicy::Operation,
1625 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001626bool ConvertPaddings(const HalOperation& operation,
1627 const HalModel& model,
1628 ConversionData& data,
1629 unsigned int rank,
1630 armnn::PadDescriptor& padDescriptor)
1631{
1632 using HalOperand = typename HalPolicy::Operand;
1633
1634 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1635 if (!paddingsOperand)
1636 {
1637 return Fail("%s: Could not read paddings operand", __func__);
1638 }
1639
1640 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1641 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1642 {
1643 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1644 }
1645
1646 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001647 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1648 {
1649 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1650 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001651
1652 // add padding for each dimension of input tensor.
1653 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1654 {
1655 int paddingBeforeInput = paddings[i];
1656 int paddingAfterInput = paddings[i + 1];
1657
1658 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1659 {
1660 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1661 }
1662
1663 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1664 }
1665
1666 return true;
1667}
1668
1669template<typename HalPolicy,
1670 typename HalOperation = typename HalPolicy::Operation,
1671 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001672bool ConvertPooling2d(const HalOperation& operation,
1673 const char* operationName,
1674 armnn::PoolingAlgorithm poolType,
1675 const HalModel& model,
1676 ConversionData& data)
1677{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001678 using HalOperand = typename HalPolicy::Operand;
1679 using HalOperandType = typename HalPolicy::OperandType;
1680
1681 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001682 if (!input.IsValid())
1683 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001684 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001685 }
1686
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001687 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001688 if (!output)
1689 {
1690 return Fail("%s: Could not read output 0", __func__);
1691 }
1692
1693 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1694 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1695
arovir01b0717b52018-09-05 17:03:25 +01001696 armnn::Pooling2dDescriptor desc;
1697 desc.m_PoolType = poolType;
1698 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001699 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001700
1701 ActivationFn activation;
1702
Sadik Armagan15d63e22019-07-26 16:59:35 +01001703 auto inputSize = operation.inputs.size();
1704
1705 if (inputSize >= 10)
1706 {
1707 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1708 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1709 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1710 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1711 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1712 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1713 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1714 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1715 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1716 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1717 {
1718 return Fail("%s: Operation has invalid inputs", operationName);
1719 }
1720
Kevin May42477c12020-03-26 13:34:14 +00001721 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001722 {
1723 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1724 }
1725 }
1726 else
arovir01b0717b52018-09-05 17:03:25 +01001727 {
1728 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1729 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001730 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1731 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1732 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1733 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1734 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1735 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001736 {
1737 return Fail("%s: Operation has invalid inputs", operationName);
1738 }
1739
Kevin May42477c12020-03-26 13:34:14 +00001740 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001741 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001742 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001743 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001744
1745 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1746 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1747 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1748
1749 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1750 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001751 }
1752
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001753 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001754
1755 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1756 {
1757 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1758 IsPooling2dSupported,
1759 data.m_Backends,
1760 isSupported,
1761 inputInfo,
1762 outputInfo,
1763 desc);
1764
1765 };
1766
1767 if(IsDynamicTensor(outputInfo))
1768 {
1769 isSupported = AreDynamicTensorsSupported();
1770 }
1771 else
1772 {
1773 validateFunc(outputInfo, isSupported);
1774 }
1775
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001776 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001777 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001778 return false;
arovir01b0717b52018-09-05 17:03:25 +01001779 }
arovir01b0717b52018-09-05 17:03:25 +01001780
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001781 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1782 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001783 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001784 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001785 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001786
1787 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1788 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001789 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001790 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001791 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001792
1793 input.Connect(pooling2dLayer->GetInputSlot(0));
1794
Finn Williamsa4983ce2020-07-23 12:55:12 +01001795 if (!isSupported)
1796 {
1797 return false;
1798 }
1799
1800 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001801}
1802
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001803template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001804 typename HalOperation = typename HalPolicy::Operation,
1805 typename HalModel = typename HalPolicy::Model>
1806bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001807{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001808 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001809
1810 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1811 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1812
1813 if (!input0.IsValid() || !input1.IsValid())
1814 {
1815 return Fail("%s: Operation has invalid inputs", __func__);
1816 }
1817
1818 // The FuseActivation parameter is always the input index 2
1819 // and it should be optional
1820 ActivationFn activationFunction;
1821 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1822 {
1823 return Fail("%s: Operation has invalid inputs", __func__);
1824 }
1825
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001826 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001827 if (!outputOperand)
1828 {
1829 return false;
1830 }
1831
1832 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1833 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1834
1835 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001836
1837 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001838 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1839 {
1840 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1841 IsAdditionSupported,
1842 data.m_Backends,
1843 isSupported,
1844 inputInfo0,
1845 inputInfo1,
1846 outputInfo);
1847 };
1848
1849 if(!IsDynamicTensor(outputInfo))
1850 {
1851 validateFunc(outputInfo, isSupported);
1852 }
1853 else
1854 {
1855 isSupported = AreDynamicTensorsSupported();
1856 }
1857
Mike Kelly46272802019-08-14 17:00:48 +01001858 if (!isSupported)
1859 {
1860 return false;
1861 }
1862
1863 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1864 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1865
1866 if (endLayer != nullptr)
1867 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001868 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001869 if (!isReshapeSupported)
1870 {
1871 return false;
1872 }
1873
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001874 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01001875 }
1876 else
1877 {
1878 return Fail("%s: ProcessActivation failed", __func__);
1879 }
1880}
1881
1882template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001883 typename HalOperation = typename HalPolicy::Operation,
1884 typename HalModel = typename HalPolicy::Model>
1885bool ConvertArgMinMax(const HalOperation& operation,
1886 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001887 ConversionData& data,
1888 armnn::ArgMinMaxFunction argMinMaxFunction)
1889{
1890 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1891
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001892 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001893 using HalOperandType = typename HalPolicy::OperandType;
1894
1895 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1896
1897 if (!input0.IsValid())
1898 {
1899 return Fail("%s: Operation has invalid inputs", __func__);
1900 }
1901
1902 int32_t axis;
1903 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1904 {
1905 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1906 }
1907
1908 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1909 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1910
1911 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1912 {
1913 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1914 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1915 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1916 return Fail("%s: Axis must be in range [-n, n)", __func__);
1917 }
1918
1919 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1920 if (!output)
1921 {
1922 return Fail("%s: Could not read output 0", __func__);
1923 }
1924
1925 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1926
1927 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001928
1929 armnn::ArgMinMaxDescriptor descriptor;
1930 descriptor.m_Function = argMinMaxFunction;
1931 descriptor.m_Axis = axis;
1932
1933 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001934
1935 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1936 {
1937 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1938 IsArgMinMaxSupported,
1939 data.m_Backends,
1940 isSupported,
1941 inputInfo0,
1942 outputInfo,
1943 descriptor);
1944 };
1945
1946 if(IsDynamicTensor(outputInfo))
1947 {
1948 isSupported = AreDynamicTensorsSupported();
1949 }
1950 else
1951 {
1952 validateFunc(outputInfo, isSupported);
1953 }
1954
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001955 if (!isSupported)
1956 {
1957 return false;
1958 }
1959
1960 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1961 assert(layer != nullptr);
1962
1963 input0.Connect(layer->GetInputSlot(0));
1964
Finn Williamsa4983ce2020-07-23 12:55:12 +01001965 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001966}
1967
1968template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001969 typename HalOperation = typename HalPolicy::Operation,
1970 typename HalModel = typename HalPolicy::Model>
1971bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001972{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001973 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001974 using HalOperandType = typename HalPolicy::OperandType;
1975
1976 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1977 if (operation.inputs.size() <= 1)
1978 {
1979 return Fail("%s: Operation has insufficient arguments", __func__);
1980 }
1981
1982 // Get inputs and outputs
1983 const std::size_t numInputTensors = operation.inputs.size() - 1;
1984
1985 int32_t concatDim;
1986 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1987 {
1988 return Fail("%s: Operation has invalid inputs", __func__);
1989 }
1990
1991 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1992 if (!outputOperand)
1993 {
1994 return Fail("%s: Operation has no outputs", __func__);
1995 }
1996
Mike Kellyb8805202019-07-31 17:25:43 +01001997 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1998 armnn::TensorShape outputShape = outputInfo.GetShape();
1999
2000 //
2001 // handle negative concat dims along the lines of tensorflow as described here:
2002 // https://www.tensorflow.org/api_docs/python/tf/concat
2003 // "negative axis refers to axis + rank(values)-th dimension"
2004 //
2005 if (concatDim < 0)
2006 {
2007 concatDim += outputShape.GetNumDimensions();
2008 }
2009
2010 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2011 {
2012 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2013 }
2014
2015 std::vector<LayerInputHandle> inputHandles;
2016 std::vector<armnn::TensorShape> inputShapes;
2017
2018 inputHandles.reserve(numInputTensors);
2019 inputShapes.reserve(numInputTensors);
2020
2021 bool inputsHaveBeenReshaped = false;
2022 unsigned int tensorDimensionsAdded = 0;
2023
2024 for (uint32_t i = 0; i < numInputTensors; ++i)
2025 {
2026 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2027 if (!operand)
2028 {
2029 return Fail("%s: Operation has invalid inputs", __func__);
2030 }
2031
Teresa Charlin3b959602019-10-31 17:05:47 +00002032 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2033 if (!operandInputHandle.IsValid())
2034 {
2035 return Fail("%s: Operation has invalid inputs", __func__);
2036 }
Mike Kellyb8805202019-07-31 17:25:43 +01002037
Teresa Charlin3b959602019-10-31 17:05:47 +00002038 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002039 if (operandShape.GetNumDimensions() == 0)
2040 {
2041 return Fail("%s: Operands with rank 0 are not supported", __func__);
2042 }
2043
2044 if (RequiresReshape(operandShape))
2045 {
2046 inputsHaveBeenReshaped = true;
2047
2048 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2049
2050 // Expand the tensor to three dimensions
2051 if (operandShape.GetNumDimensions() == 2)
2052 {
2053 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2054 tensorDimensionsAdded = 1;
2055 }
2056 else
2057 {
2058 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2059 tensorDimensionsAdded = 2;
2060 }
2061
Kevin Mayaed08ac2019-12-12 16:33:31 +00002062 armnn::ReshapeDescriptor reshapeDescriptor;
2063 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2064
2065 bool isSupported = false;
2066 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2067 IsReshapeSupported,
2068 data.m_Backends,
2069 isSupported,
2070 operandInputHandle.GetTensorInfo(),
2071 reshapeInfo,
2072 reshapeDescriptor);
2073 if (!isSupported)
2074 {
2075 return false;
2076 }
2077
Mike Kellyb8805202019-07-31 17:25:43 +01002078 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
2079 *data.m_Network,
2080 operandInputHandle,
2081 reshapeInfo
2082 );
2083
2084 // Point to the reshape operation rather then the input operation
2085 operandShape = reshapeInfo.GetShape();
2086 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2087 }
2088
2089 inputShapes.emplace_back(operandShape);
2090 inputHandles.emplace_back(operandInputHandle);
2091
2092 if (!inputHandles.back().IsValid())
2093 {
2094 return Fail("%s: Operation has invalid inputs", __func__);
2095 }
2096 }
2097
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002098 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002099
2100 if (inputsHaveBeenReshaped)
2101 {
2102 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2103 concatDim += tensorDimensionsAdded;
2104
2105 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2106 if (tensorDimensionsAdded == 1)
2107 {
2108 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2109 }
2110 else if (tensorDimensionsAdded == 2)
2111 {
2112 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2113 }
2114 }
2115
2116 // Check if permutations is required and get the pair of permutations required for the concatenation.
2117 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2118 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2119 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2120
2121 bool needPermute =
2122 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2123
2124 if (needPermute)
2125 {
Mike Kelly4a956582020-02-28 10:32:09 +00002126 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002127 }
2128
2129 outputInfo.SetShape(outputShape);
2130
2131 // this is no-op for identity swizzles, otherwise it replaces both
2132 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002133 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002134 {
2135 return false;
2136 }
Mike Kellyb8805202019-07-31 17:25:43 +01002137
2138 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2139 armnn::OriginsDescriptor concatDescriptor;
2140
2141 try
2142 {
2143 // The concat descriptor is always created across the only supported concat dimension
2144 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2145 concatDescriptor =
2146 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2147 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002148 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002149 {
2150 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2151 }
2152
2153 // Validate the output shape is correct given the input shapes based on the
2154 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2155 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2156 {
2157 return Fail("%s: Error validating the output shape for concat", __func__);
2158 }
2159
2160 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2161 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2162 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2163
2164 bool isSupported = false;
2165 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2166 IsConcatSupported,
2167 data.m_Backends,
2168 isSupported,
2169 inputTensorInfos,
2170 outputInfo,
2171 concatDescriptor);
2172 if (!isSupported)
2173 {
2174 return false;
2175 }
2176
2177 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2178 assert(layer != nullptr);
2179 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2180
2181 // Connect inputs to the layer
2182 const int numInputSlots = layer->GetNumInputSlots();
2183 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2184 for (int i = 0; i < numInputSlots; ++i)
2185 {
2186 // connect the input directly to the merge (concat) layer
2187 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2188 }
2189
2190 if (needPermute)
2191 {
Mike Kelly4a956582020-02-28 10:32:09 +00002192 armnn::TransposeDescriptor transposeDesc;
2193 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002194 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2195 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2196 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002197
2198 bool isSupported = false;
2199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002200 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002201 data.m_Backends,
2202 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002203 inputTransposeInfo,
2204 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002205 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002206 if (!isSupported)
2207 {
2208 return false;
2209 }
Mike Kellyb8805202019-07-31 17:25:43 +01002210 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002211 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2212 layer->GetOutputSlot(0),
2213 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002214 layer = &deswizzleLayer;
2215 }
2216
2217 if (inputsHaveBeenReshaped)
2218 {
2219 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2220
2221 // Undo the reshape knowing the amount of dimensions added
2222 if (tensorDimensionsAdded == 1)
2223 {
2224 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2225 afterConcatInfo.GetShape()[2] }));
2226 }
2227 else if (tensorDimensionsAdded == 2)
2228 {
2229 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2230 }
2231
Kevin Mayaed08ac2019-12-12 16:33:31 +00002232 armnn::ReshapeDescriptor reshapeDescriptor;
2233 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2234
2235 bool isSupported = false;
2236 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2237 IsReshapeSupported,
2238 data.m_Backends,
2239 isSupported,
2240 layer->GetOutputSlot(0).GetTensorInfo(),
2241 afterConcatInfo,
2242 reshapeDescriptor);
2243 if (!isSupported)
2244 {
2245 return false;
2246 }
2247
Mike Kellyb8805202019-07-31 17:25:43 +01002248 layer = &AddReshapeLayer(
2249 *data.m_Network,
2250 layer->GetOutputSlot(0),
2251 afterConcatInfo
2252 );
2253 }
2254
2255 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2256}
2257
2258template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002259 typename HalOperation = typename HalPolicy::Operation,
2260 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002261bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2262{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002263 using HalOperand = typename HalPolicy::Operand;
2264 using HalOperandType = typename HalPolicy::OperandType;
2265
2266 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002267 if (!input.IsValid())
2268 {
2269 return Fail("%s: Operation has invalid inputs", __func__);
2270 }
2271
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002272 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002273 if (!output)
2274 {
2275 return Fail("%s: Could not read output 0", __func__);
2276 }
2277
2278 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002279 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002280
2281 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002282 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2283 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002284
2285 if (!weightsPin.IsValid() || !biasPin.IsValid())
2286 {
2287 return Fail("%s: Operation has invalid inputs", __func__);
2288 }
2289
2290 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002291 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002292 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2293
2294 armnn::Convolution2dDescriptor desc;
2295 desc.m_DataLayout = armnn::DataLayout::NHWC;
2296 ActivationFn activation;
2297
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002298 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002299 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002300 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2301 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2302 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2303 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2304 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2305 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002306 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002307 {
2308 return Fail("%s: Operation has invalid inputs", __func__);
2309 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002310 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002311 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002312 {
2313 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002314 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2315 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2316 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002317 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002318 {
2319 return Fail("%s: Operation has invalid inputs", __func__);
2320 }
2321
2322 const uint32_t kernelX = weights.GetShape()[2];
2323 const uint32_t kernelY = weights.GetShape()[1];
2324 const uint32_t inputX = inputInfo.GetShape()[2];
2325 const uint32_t inputY = inputInfo.GetShape()[1];
2326
2327 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2328 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002329 }
2330 else
2331 {
2332 return Fail("%s: Unsupported number of operation inputs", __func__);
2333 }
2334
2335 desc.m_BiasEnabled = true;
2336 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2337
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002338 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002339 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2340 {
2341 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2342 IsConvolution2dSupported,
2343 data.m_Backends,
2344 isSupported,
2345 inputInfo,
2346 outputInfo,
2347 desc,
2348 weights.GetInfo(),
2349 biases);
2350 };
2351
2352 if(!IsDynamicTensor(outputInfo))
2353 {
2354 validateFunc(outputInfo, isSupported);
2355 }
2356 else
2357 {
2358 isSupported = AreDynamicTensorsSupported();
2359 }
2360
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002361 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002362 {
2363 return false;
2364 }
2365
2366 armnn::IConnectableLayer* startLayer =
2367 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2368
2369 if (!startLayer)
2370 {
2371 return Fail("%s: AddConvolution2dLayer failed", __func__);
2372 }
2373
2374 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2375
2376 if (!endLayer)
2377 {
2378 return Fail("%s: ProcessActivation failed", __func__);
2379 }
2380
2381 input.Connect(startLayer->GetInputSlot(0));
2382
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002383 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384}
2385
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002386template<typename HalPolicy,
2387 typename HalOperation = typename HalPolicy::Operation,
2388 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002389bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2390{
2391 using HalOperand = typename HalPolicy::Operand;
2392 using HalOperandType = typename HalPolicy::OperandType;
2393
2394 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2395 if (!input.IsValid() )
2396 {
2397 return Fail("%s: Operation has invalid inputs", __func__);
2398 }
2399
2400 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2401 unsigned int rank = inputInfo.GetNumDimensions();
2402 if (rank != 4)
2403 {
2404 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2405 }
2406
2407 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2408 if (!output)
2409 {
2410 return Fail("%s: Could not read output 0", __func__);
2411 }
2412
2413 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002414
2415 armnn::DepthToSpaceDescriptor descriptor;
2416
2417 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2418 if (descriptor.m_BlockSize <= 1)
2419 {
2420 return Fail("%s: Block size must be at least 1 in all dimensions");
2421 }
2422
2423 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002424 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002425 {
2426 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2427 }
2428
2429 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002430 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2431 {
2432 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2433 IsDepthToSpaceSupported,
2434 data.m_Backends,
2435 isSupported,
2436 inputInfo,
2437 outputInfo,
2438 descriptor);
2439 };
2440
2441 if(!IsDynamicTensor(outputInfo))
2442 {
2443 validateFunc(outputInfo, isSupported);
2444 }
2445 else
2446 {
2447 isSupported = AreDynamicTensorsSupported();
2448 }
2449
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002450 if (!isSupported)
2451 {
2452 return false;
2453 }
2454
2455 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2456 assert(layer != nullptr);
2457 input.Connect(layer->GetInputSlot(0));
2458
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002459 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002460}
2461
2462template<typename HalPolicy,
2463 typename HalOperation = typename HalPolicy::Operation,
2464 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002465bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2466{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002467 using HalOperand = typename HalPolicy::Operand;
2468 using HalOperandType = typename HalPolicy::OperandType;
2469
2470 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002471
2472 if (!input.IsValid())
2473 {
2474 return Fail("%s: Operation has invalid inputs", __func__);
2475 }
2476
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002477 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002478
2479 if (!output)
2480 {
2481 return Fail("%s: Could not read output 0", __func__);
2482 }
2483
2484 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002485 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002486
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002487 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002488 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002489 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002490
2491 if (weightsOperand == nullptr)
2492 {
2493 return Fail("%s: Operand is invalid", __func__);
2494 }
2495 armnn::DepthwiseConvolution2dDescriptor desc;
2496 desc.m_DataLayout = armnn::DataLayout::NHWC;
2497
Mike Kellyb5fdf382019-06-11 16:35:25 +01002498 // Reinterpret weight data as [ H, W, I, M ]
2499 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2500 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002501 inputInfo.GetShape()[3],
2502 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002503
2504 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2505 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2506
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002507 const ConstTensorPin weightsPin =
2508 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2509 1,
2510 model,
2511 data,
2512 HWIMToMIHW,
2513 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002514
2515 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002516 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002517
2518 if (!weightsPin.IsValid() || !biasPin.IsValid())
2519 {
2520 return Fail("%s: Operation has invalid inputs", __func__);
2521 }
2522
2523 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2524 armnn::ConstTensor bias = biasPin.GetConstTensor();
2525 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2526
2527 ActivationFn activation;
2528
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002529 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002530 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002531 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2532 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2533 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2534 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2535 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2536 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002537 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002538 {
2539 return Fail("%s: Operation has invalid inputs", __func__);
2540 }
2541 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002542 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002543 {
2544 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002545 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2546 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2547 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002548 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002549 {
2550 return Fail("%s: Operation has invalid inputs", __func__);
2551 }
2552
2553 const uint32_t kernelX = weights.GetShape()[3];
2554 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002555 const uint32_t inputX = inputInfo.GetShape()[2];
2556 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002557
2558 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2559 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2560 }
2561 else
2562 {
2563 return Fail("%s: Unsupported number of operation inputs", __func__);
2564 }
2565
2566 desc.m_BiasEnabled = true;
2567 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2568
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002569 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002570 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2571 {
2572 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2573 IsDepthwiseConvolutionSupported,
2574 data.m_Backends,
2575 isSupported,
2576 inputInfo,
2577 outputInfo,
2578 desc,
2579 weights.GetInfo(),
2580 biases);
2581 };
2582
2583 if(!IsDynamicTensor(outputInfo))
2584 {
2585 validateFunc(outputInfo, isSupported);
2586 }
2587 else
2588 {
2589 isSupported = AreDynamicTensorsSupported();
2590 }
2591
2592
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002593 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002594 {
2595 return false;
2596 }
2597
2598 armnn::IConnectableLayer* startLayer =
2599 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2600 if (!startLayer)
2601 {
2602 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2603 }
2604
2605 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2606 if (!endLayer)
2607 {
2608 return Fail("%s: ProcessActivation failed", __func__);
2609 }
2610
2611 input.Connect(startLayer->GetInputSlot(0));
2612
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002613 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01002614}
2615
Mike Kelly3c673942019-07-25 09:26:06 +01002616template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002617 typename HalOperation = typename HalPolicy::Operation,
2618 typename HalModel = typename HalPolicy::Model>
2619bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002620{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002621 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002622
2623 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2624 if (!input.IsValid())
2625 {
2626 return Fail("%s: Operation has invalid input", __func__);
2627 }
2628
Sadik Armagan98c0f662019-11-21 15:54:36 +00002629 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2630 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2631 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2632 {
2633 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2634 }
2635
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002636 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002637 if (!outputOperand)
2638 {
2639 return Fail("%s: Operation has invalid outputs", __func__);
2640 }
2641
2642 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002643
2644 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002645 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2646 {
2647 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2648 IsDequantizeSupported,
2649 data.m_Backends,
2650 isSupported,
2651 inputInfo,
2652 outputInfo);
2653 };
2654
2655 if(IsDynamicTensor(outputInfo))
2656 {
2657 isSupported = AreDynamicTensorsSupported();
2658 }
2659 else
2660 {
2661 validateFunc(outputInfo, isSupported);
2662 }
2663
Mike Kelly46272802019-08-14 17:00:48 +01002664 if (!isSupported)
2665 {
2666 return false;
2667 }
2668
2669 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2670 assert(layer != nullptr);
2671 input.Connect(layer->GetInputSlot(0));
2672
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002673 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002674}
2675
2676template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002677 typename HalOperation = typename HalPolicy::Operation,
2678 typename HalModel = typename HalPolicy::Model>
2679bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002680{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002681 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002682
2683 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2684 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2685
2686 if (!input0.IsValid() || !input1.IsValid())
2687 {
2688 return Fail("%s: Operation has invalid inputs", __func__);
2689 }
2690
2691 // The FuseActivation parameter is always the input index 2
2692 // and it should be optional
2693 ActivationFn activationFunction;
2694 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2695 {
2696 return Fail("%s: Operation has invalid inputs", __func__);
2697 }
2698
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002699 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002700 if (!output)
2701 {
2702 return Fail("%s: Could not read output 0", __func__);
2703 }
2704
2705 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002706
2707 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002708 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2709 {
2710 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2711 IsDivisionSupported,
2712 data.m_Backends,
2713 isSupported,
2714 input0.GetTensorInfo(),
2715 input1.GetTensorInfo(),
2716 outputInfo);
2717 };
2718
2719 if(!IsDynamicTensor(outputInfo))
2720 {
2721 validateFunc(outputInfo, isSupported);
2722 }
2723 else
2724 {
2725 isSupported = AreDynamicTensorsSupported();
2726 }
2727
Mike Kelly46272802019-08-14 17:00:48 +01002728 if (!isSupported)
2729 {
2730 return false;
2731 }
2732
2733 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2734 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2735
2736 if (endLayer)
2737 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002738 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002739 if (!isReshapeSupported)
2740 {
2741 return false;
2742 }
2743
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002744 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002745 }
2746 return Fail("%s: ProcessActivation failed", __func__);
2747}
2748
2749template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002750 typename HalOperation = typename HalPolicy::Operation,
2751 typename HalModel = typename HalPolicy::Model>
2752bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002753{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002754 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002755
2756 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2757 if (!input.IsValid())
2758 {
2759 return Fail("%s: Operation has invalid inputs", __func__);
2760 }
2761
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002762 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002763 if (!outputOperand)
2764 {
2765 return Fail("%s: Operation has invalid outputs", __func__);
2766 }
2767
2768 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002769
2770 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002771 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2772 {
2773 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2774 IsFloorSupported,
2775 data.m_Backends,
2776 isSupported,
2777 input.GetTensorInfo(),
2778 outputInfo);
2779 };
2780
2781 if(!IsDynamicTensor(outputInfo))
2782 {
2783 validateFunc(outputInfo, isSupported);
2784 }
2785 else
2786 {
2787 isSupported = AreDynamicTensorsSupported();
2788 }
2789
Mike Kelly46272802019-08-14 17:00:48 +01002790 if (!isSupported)
2791 {
2792 return false;
2793 }
2794
2795 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2796 assert(layer != nullptr);
2797 input.Connect(layer->GetInputSlot(0));
2798
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002799 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002800}
2801
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002802inline bool IsQSymm8(const V1_0::Operand&)
2803{
2804 return false;
2805}
2806
Kevin May42477c12020-03-26 13:34:14 +00002807#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002808
2809inline bool IsQSymm8(const V1_2::Operand& operand)
2810{
2811 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2812}
2813
2814#endif
2815
Kevin May42477c12020-03-26 13:34:14 +00002816#ifdef ARMNN_ANDROID_NN_V1_3
2817
2818inline bool IsQSymm8(const V1_3::Operand& operand)
2819{
2820 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2821}
2822
2823#endif
2824
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002825enum class DequantizeStatus
2826{
2827 SUCCESS,
2828 NOT_REQUIRED,
2829 INVALID_OPERAND
2830};
2831
2832using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2833
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002834template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002835 typename HalOperation = typename HalPolicy::Operation,
2836 typename HalModel = typename HalPolicy::Model>
2837DequantizeResult DequantizeIfRequired(size_t operand_index,
2838 const HalOperation& operation,
2839 const HalModel& model,
2840 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002841{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002842 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002843
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002844 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002845 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002846 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002847 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002848 }
2849
2850 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2851 {
2852 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002853 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002854 }
2855
2856 const size_t weightsInputIndex = operation.inputs[operand_index];
2857
2858 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2859 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002860 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002861 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002862 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002863 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002864 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2865 {
2866 continue;
2867 }
2868
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002869 size_t outOpIndex = weightsInputIndex + 1;
2870 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002871 {
2872 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002873 }
2874
2875 if (outOpIndex != weightsInputIndex)
2876 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002877 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002878 }
2879
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002880 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002881 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002882
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002883 if (!IsQSymm8(*operand))
2884 {
2885 // Only supporting dequantize from QSYMM8 to FLOAT
2886 break;
2887 }
2888
2889 // Allocate a new buffer for the dequantized data and manually dequantize
2890 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2891 if (!startValue)
2892 {
2893 // Failed to get the operand address
2894 break;
2895 }
2896
2897 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2898 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002899 const float quantizationScale = operand->scale;
2900
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002901 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2902 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2903 {
2904 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002905 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002906 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2907 }
2908
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002909 // Construct tensor info for dequantized ConstTensor
2910 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2911 operand->dimensions.data(),
2912 armnn::DataType::Float32);
2913
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002914 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2915 std::move(tensorInfo),
2916 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002917 }
2918
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002919 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002920}
2921
2922template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002923 typename HalOperation = typename HalPolicy::Operation,
2924 typename HalModel = typename HalPolicy::Model>
2925ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2926 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002927 const ConversionData& data,
2928 size_t operandIndex,
2929 bool optional = false)
2930{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002931 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2932
2933 DequantizeStatus status = std::get<3>(dequantized);
2934 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002935 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002936 case DequantizeStatus::INVALID_OPERAND:
2937 {
2938 // return invalid const tensor pin
2939 return ConstTensorPin();
2940 }
2941 case DequantizeStatus::NOT_REQUIRED:
2942 {
2943 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2944 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2945 }
2946 case DequantizeStatus::SUCCESS:
2947 default:
2948 {
2949 return ConstTensorPin(
2950 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2951 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002952 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002953}
2954
2955
Mike Kelly46272802019-08-14 17:00:48 +01002956template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002957 typename HalOperation = typename HalPolicy::Operation,
2958 typename HalModel = typename HalPolicy::Model>
2959bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002960{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002961 using HalOperand = typename HalPolicy::Operand;
2962
Mike Kelly46272802019-08-14 17:00:48 +01002963 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2964 if (!input.IsValid())
2965 {
2966 return Fail("%s: Operation has invalid inputs", __func__);
2967 }
2968
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002969 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002970 if (!output)
2971 {
2972 return Fail("%s: Could not read output 0", __func__);
2973 }
2974
2975 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2976 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2977
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002978 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2979 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002980
2981 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002982 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002983 return Fail("%s: Operation has invalid weights", __func__);
2984 }
2985
2986 if (!biasPin.IsValid())
2987 {
2988 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002989 }
2990
2991 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2992 armnn::ConstTensor bias = biasPin.GetConstTensor();
2993 armnn::TensorInfo reshapedInfo = inputInfo;
2994
2995 try
2996 {
2997 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002998 }
2999 catch (const std::exception& e)
3000 {
Mike Kelly46272802019-08-14 17:00:48 +01003001 return Fail("%s: %s", __func__, e.what());
3002 }
3003
3004 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3005 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
3006
3007 ActivationFn activationFunction;
3008 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3009 {
3010 return Fail("%s: Operation has invalid inputs", __func__);
3011 }
3012
3013 armnn::FullyConnectedDescriptor desc;
3014 desc.m_TransposeWeightMatrix = true;
3015 desc.m_BiasEnabled = true;
3016
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00003017 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3018 weights.GetInfo().GetShape(),
3019 outputInfo.GetShape(),
3020 desc.m_TransposeWeightMatrix))
3021 {
3022 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3023 }
3024
Mike Kelly46272802019-08-14 17:00:48 +01003025 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003026 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3027 {
3028 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly46272802019-08-14 17:00:48 +01003029 IsFullyConnectedSupported,
3030 data.m_Backends,
3031 isSupported,
3032 reshapedInfo,
3033 outputInfo,
3034 weights.GetInfo(),
3035 bias.GetInfo(),
3036 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003037 };
3038
3039 if(!IsDynamicTensor(outputInfo))
3040 {
3041 validateFunc(outputInfo, isSupported);
3042 }
3043 else
3044 {
3045 isSupported = AreDynamicTensorsSupported();
3046 }
3047
Mike Kelly46272802019-08-14 17:00:48 +01003048 if (!isSupported)
3049 {
3050 return false;
3051 }
3052
3053 armnn::IConnectableLayer* startLayer =
3054 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
3055 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3056
3057 if (endLayer != nullptr)
3058 {
3059 if (inputInfo.GetNumDimensions() > 2U)
3060 {
3061 armnn::ReshapeDescriptor reshapeDescriptor;
3062 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3063
3064 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3065 assert(reshapeLayer != nullptr);
3066 input.Connect(reshapeLayer->GetInputSlot(0));
3067 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3068 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3069 }
3070 else
3071 {
3072 input.Connect(startLayer->GetInputSlot(0));
3073 }
3074
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003075 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003076 }
3077 else
3078 {
3079 return Fail("%s: ProcessActivation failed", __func__);
3080 }
3081}
3082
3083template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003084 typename HalOperation = typename HalPolicy::Operation,
3085 typename HalModel = typename HalPolicy::Model>
3086bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003087{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003088 using HalOperand = typename HalPolicy::Operand;
3089
Mike Kelly999e2092019-08-15 10:46:46 +01003090 if (operation.inputs.size() != 1)
3091 {
3092 return Fail("%s: Optional inputs are not supported", __func__);
3093 }
3094
Mike Kelly46272802019-08-14 17:00:48 +01003095 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3096 if (!input.IsValid())
3097 {
3098 return Fail("%s: Operation has invalid inputs", __func__);
3099 }
3100
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003101 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003102 if (!output)
3103 {
3104 return Fail("%s: Could not read output 0", __func__);
3105 }
3106
3107 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3108 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3109
Mike Kelly46272802019-08-14 17:00:48 +01003110 if (outputInfo.GetNumDimensions() != 4u)
3111 {
3112 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3113 }
3114
3115 armnn::L2NormalizationDescriptor desc;
3116 desc.m_DataLayout = armnn::DataLayout::NHWC;
3117
3118 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003119 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3120 {
3121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3122 IsL2NormalizationSupported,
3123 data.m_Backends,
3124 isSupported,
3125 inputInfo,
3126 outputInfo,
3127 desc);
3128 };
3129
3130 if(!IsDynamicTensor(outputInfo))
3131 {
3132 validateFunc(outputInfo, isSupported);
3133 }
3134 else
3135 {
3136 isSupported = AreDynamicTensorsSupported();
3137 }
3138
Mike Kelly46272802019-08-14 17:00:48 +01003139 if (!isSupported)
3140 {
3141 return false;
3142 }
3143
3144 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3145 assert(layer != nullptr);
3146 input.Connect(layer->GetInputSlot(0));
3147
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003148 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003149}
3150
3151template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003152 typename HalOperation = typename HalPolicy::Operation,
3153 typename HalModel = typename HalPolicy::Model>
3154bool ConvertLocalResponseNormalization(const HalOperation& operation,
3155 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003156 ConversionData& data)
3157{
Mike Kelly999e2092019-08-15 10:46:46 +01003158 if (operation.inputs.size() != 5)
3159 {
3160 return Fail("%s: Optional inputs are not supported", __func__);
3161 }
3162
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003163 using HalOperand = typename HalPolicy::Operand;
3164 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003165
3166 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3167 if (!input.IsValid())
3168 {
3169 return Fail("%s: Operation has invalid inputs", __func__);
3170 }
3171
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003172 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003173 if (!output)
3174 {
3175 return Fail("%s: Could not read output 0", __func__);
3176 }
3177
3178 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3179 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3180
Mike Kelly46272802019-08-14 17:00:48 +01003181 if (outputInfo.GetNumDimensions() != 4u)
3182 {
3183 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3184 }
3185
3186 armnn::NormalizationDescriptor descriptor;
3187 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3188 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3189 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3190
3191 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003192 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003193 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3194 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3195 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3196 {
3197 return Fail("%s: Operation has invalid inputs", __func__);
3198 }
3199
3200 // ArmNN expects normSize to be the full size of the normalization
3201 // window rather than the radius as in AndroidNN.
3202 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3203
3204 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003205 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3206 {
3207 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3208 IsNormalizationSupported,
3209 data.m_Backends,
3210 isSupported,
3211 inputInfo,
3212 outputInfo,
3213 descriptor);
3214 };
3215
3216 if(!IsDynamicTensor(outputInfo))
3217 {
3218 validateFunc(outputInfo, isSupported);
3219 }
3220 else
3221 {
3222 isSupported = AreDynamicTensorsSupported();
3223 }
3224
Mike Kelly46272802019-08-14 17:00:48 +01003225 if (!isSupported)
3226 {
3227 return false;
3228 }
3229
3230
3231 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3232 assert(layer != nullptr);
3233 input.Connect(layer->GetInputSlot(0));
3234
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003235 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003236}
3237
3238template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003239 typename HalOperation = typename HalPolicy::Operation,
3240 typename HalModel = typename HalPolicy::Model>
3241bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003242{
Mike Kelly46272802019-08-14 17:00:48 +01003243 armnn::ActivationDescriptor desc;
3244 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3245
3246 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3247}
3248
3249template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003250 typename HalOperation = typename HalPolicy::Operation,
3251 typename HalModel = typename HalPolicy::Model>
3252bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003253{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003254 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003255
3256 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3257 if (!input.IsValid())
3258 {
3259 return Fail("%s: Operation has invalid inputs", __func__);
3260 }
3261
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003262 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003263 if (!output)
3264 {
3265 return Fail("%s: Could not read output 0", __func__);
3266 }
3267
3268 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003269
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003270 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003271 if (!axisOperand)
3272 {
3273 return Fail("%s: Could not read input 1", __func__);
3274 }
3275
3276 std::vector<int32_t> axis;
3277 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3278 {
3279 return Fail("%s: Input 1 has invalid values", __func__);
3280 }
3281
3282 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3283
3284 // Convert the axis to unsigned int and remove duplicates.
3285 unsigned int rank = inputInfo.GetNumDimensions();
3286 std::set<unsigned int> uniqueAxis;
3287 std::transform(axis.begin(), axis.end(),
3288 std::inserter(uniqueAxis, uniqueAxis.begin()),
3289 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3290
3291 // Get the "keep dims" flag.
3292 int32_t keepDims = 0;
3293 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3294 {
3295 return Fail("%s: Could not read input 2", __func__);
3296 }
3297
3298 armnn::MeanDescriptor descriptor;
3299 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3300 descriptor.m_KeepDims = keepDims > 0;
3301
3302 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003303 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3304 {
3305 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3306 IsMeanSupported,
3307 data.m_Backends,
3308 isSupported,
3309 inputInfo,
3310 outputInfo,
3311 descriptor);
3312 };
3313
3314 if(!IsDynamicTensor(outputInfo))
3315 {
3316 validateFunc(outputInfo, isSupported);
3317 }
3318 else
3319 {
3320 isSupported = AreDynamicTensorsSupported();
3321 }
3322
Mike Kelly46272802019-08-14 17:00:48 +01003323 if (!isSupported)
3324 {
3325 return false;
3326 }
3327
3328 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3329 assert(layer != nullptr);
3330 input.Connect(layer->GetInputSlot(0));
3331
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003332 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003333}
3334
3335template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003336 typename HalOperation = typename HalPolicy::Operation,
3337 typename HalModel = typename HalPolicy::Model>
3338bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003339{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003340 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003341
3342 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3343 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3344
3345 if (!input0.IsValid() || !input1.IsValid())
3346 {
3347 return Fail("%s: Operation has invalid inputs", __func__);
3348 }
3349
3350 // The FuseActivation parameter is always the input index 2
3351 // and it should be optional
3352 ActivationFn activationFunction;
3353 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3354 {
3355 return Fail("%s: Operation has invalid inputs", __func__);
3356 }
3357
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003358 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003359
3360 if (outputOperand == nullptr)
3361 {
3362 return false;
3363 }
3364
3365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003366
3367 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003368 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3369 {
3370 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3371 IsMultiplicationSupported,
3372 data.m_Backends,
3373 isSupported,
3374 input0.GetTensorInfo(),
3375 input1.GetTensorInfo(),
3376 outputInfo);
3377 };
3378
3379 if(!IsDynamicTensor(outputInfo))
3380 {
3381 validateFunc(outputInfo, isSupported);
3382 }
3383 else
3384 {
3385 isSupported = AreDynamicTensorsSupported();
3386 }
3387
Mike Kelly46272802019-08-14 17:00:48 +01003388 if (!isSupported)
3389 {
3390 return false;
3391 }
3392
3393 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3394 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3395
3396 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3397 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3398
3399 if (endLayer != nullptr)
3400 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003401 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003402 if (!isReshapeSupported)
3403 {
3404 return false;
3405 }
3406
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003407 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003408 }
3409 else
3410 {
3411 return Fail("%s: ProcessActivation failed", __func__);
3412 }
3413}
3414
3415template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003416 typename HalOperation = typename HalPolicy::Operation,
3417 typename HalModel = typename HalPolicy::Model>
3418bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003419{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003420 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003421
Mike Kelly3c673942019-07-25 09:26:06 +01003422 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3423 if (!input.IsValid())
3424 {
3425 return Fail("%s: Operation has invalid inputs", __func__);
3426 }
3427
3428 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3429 unsigned int rank = inputInfo.GetNumDimensions();
3430
3431 armnn::PadDescriptor descriptor;
3432 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3433 {
3434 return Fail("%s: Could not convert paddings", __func__);
3435 }
3436
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003437 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3438 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003439 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3440 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3441 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003442 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003443 {
3444 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3445 }
3446
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003447 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003448 if (!output)
3449 {
3450 return Fail("%s: Could not read output", __func__);
3451 }
3452
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003453 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003454
3455 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003456 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3457 {
3458 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3459 IsPadSupported,
3460 data.m_Backends,
3461 isSupported,
3462 inputInfo,
3463 outputInfo,
3464 descriptor);
3465 };
3466
3467 if(!IsDynamicTensor(outputInfo))
3468 {
3469 validateFunc(outputInfo, isSupported);
3470 }
3471 else
3472 {
3473 isSupported = AreDynamicTensorsSupported();
3474 }
3475
Mike Kelly3c673942019-07-25 09:26:06 +01003476 if (!isSupported)
3477 {
3478 return false;
3479 }
3480
3481 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3482 assert(layer != nullptr);
3483 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003484
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003485 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003486}
3487
Mike Kelly0a879362019-07-29 16:56:31 +01003488template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003489 typename HalOperation = typename HalPolicy::Operation,
3490 typename HalModel = typename HalPolicy::Model>
3491bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003492{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003493 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003494
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003495 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3496 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3497 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003498
3499 if (inputOperand == nullptr
3500 || requestedShapeOperand == nullptr
3501 || outputOperand == nullptr)
3502 {
3503 return Fail("%s: Operation has invalid inputs", __func__);
3504 }
3505
3506 if (requestedShapeOperand->dimensions.size() != 1)
3507 {
3508 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3509 __func__, requestedShapeOperand->dimensions.size());
3510 }
3511
3512 std::vector<int32_t> targetDimensions;
3513 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3514 {
3515 return Fail("%s: Could not read values of input 1", __func__);
3516 }
3517
3518 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3519
3520 Shape requestedShape;
3521 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3522 // function that resolves these values into a fully specified tensor shape.
3523 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3524 {
3525 return Fail("%s: Failed to resolve the requested shape", __func__);
3526 }
3527
Mike Kelly46272802019-08-14 17:00:48 +01003528 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3529 if (!input.IsValid())
3530 {
3531 return Fail("%s: Could not read input 0", __func__);
3532 }
3533
3534 armnn::ReshapeDescriptor reshapeDescriptor;
3535 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3536 requestedShape.dimensions.data());
3537
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003538 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3539
Mike Kelly46272802019-08-14 17:00:48 +01003540 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003541 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3542 {
3543 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3544 IsReshapeSupported,
3545 data.m_Backends,
3546 isSupported,
3547 input.GetTensorInfo(),
3548 outputInfo,
3549 reshapeDescriptor);
3550 };
3551
3552 if(!IsDynamicTensor(outputInfo))
3553 {
3554 validateFunc(outputInfo, isSupported);
3555 }
3556 else
3557 {
3558 isSupported = AreDynamicTensorsSupported();
3559 }
3560
Mike Kelly46272802019-08-14 17:00:48 +01003561 if (!isSupported)
3562 {
3563 return false;
3564 }
3565
3566 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3567 assert(layer != nullptr);
3568 input.Connect(layer->GetInputSlot(0));
3569
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003570 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003571}
3572
3573template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003574 typename HalOperation = typename HalPolicy::Operation,
3575 typename HalModel = typename HalPolicy::Model>
3576bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003577{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003578 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003579
Mike Kelly0a879362019-07-29 16:56:31 +01003580 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3581 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3582
3583 if (!input0.IsValid() || !input1.IsValid())
3584 {
3585 return Fail("%s: Operation has invalid inputs", __func__);
3586 }
3587
3588 // The FuseActivation parameter is always the input index 2
3589 // and it should be optional
3590 ActivationFn activationFunction;
3591 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3592 {
3593 return Fail("%s: Operation has invalid inputs", __func__);
3594 }
3595
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003596 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003597 if (!output)
3598 {
3599 return Fail("%s: Could not read output 0", __func__);
3600 }
3601
3602 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003603
3604 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003605 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3606 {
3607 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3608 IsSubtractionSupported,
3609 data.m_Backends,
3610 isSupported,
3611 input0.GetTensorInfo(),
3612 input1.GetTensorInfo(),
3613 outputInfo);
3614 };
3615
3616 if(IsDynamicTensor(outputInfo))
3617 {
3618 isSupported = AreDynamicTensorsSupported();
3619 }
3620 else
3621 {
3622 validateFunc(outputInfo, isSupported);
3623 }
3624
Mike Kelly0a879362019-07-29 16:56:31 +01003625 if (!isSupported)
3626 {
3627 return false;
3628 }
3629
3630 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3631 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3632
3633 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3634 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3635
3636 if (endLayer)
3637 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003638 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003639 if (!isReshapeSupported)
3640 {
3641 return false;
3642 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003643 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly0a879362019-07-29 16:56:31 +01003644 }
3645
3646 return Fail("%s: ProcessActivation failed", __func__);
3647}
3648
Finn Williams23b87b32019-07-30 11:44:05 +01003649template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003650 typename HalOperation = typename HalPolicy::Operation,
3651 typename HalModel = typename HalPolicy::Model>
3652bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003653{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003654 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003655
3656 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3657 if (!input.IsValid())
3658 {
3659 return Fail("%s: Operation has invalid inputs", __func__);
3660 }
3661
3662 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3663 unsigned int rank = inputInfo.GetNumDimensions();
3664 if (rank > 4)
3665 {
3666 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3667 }
3668
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003669 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003670 if (!output)
3671 {
3672 return Fail("%s: Could not read output 0", __func__);
3673 }
Mike Kelly46272802019-08-14 17:00:48 +01003674 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3675 {
3676 return Fail("%s: Dynamic output tensors are not supported", __func__);
3677 }
3678
3679 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3680 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003681 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003682
3683 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3684
3685 std::vector<int32_t> axis;
3686 if (!axisOperand)
3687 {
3688 axis.assign(dimensionSequence,
3689 dimensionSequence + rank);
3690 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003691 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003692 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003693 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003694 }
3695
3696 std::vector<uint32_t> outputDims;
3697 for (unsigned int i = 0; i < rank; i++)
3698 {
3699 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3700 auto currentDimension = inputInfo.GetShape()[i];
3701 if (skipSqueeze || currentDimension != 1)
3702 {
3703 outputDims.push_back(currentDimension);
3704 }
3705 }
3706
3707 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3708
3709 armnn::TensorInfo outputInfo = inputInfo;
3710 outputInfo.SetShape(outShape);
3711
3712 armnn::ReshapeDescriptor reshapeDesc;
3713 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3714
3715 bool isSupported = false;
3716 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3717 IsReshapeSupported,
3718 data.m_Backends,
3719 isSupported,
3720 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003721 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003722 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003723
Mike Kelly46272802019-08-14 17:00:48 +01003724 if (!isSupported)
3725 {
3726 return false;
3727 }
3728
3729 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3730 assert(layer != nullptr);
3731 input.Connect(layer->GetInputSlot(0));
3732
3733 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3734}
3735
3736template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003737 typename HalOperation = typename HalPolicy::Operation,
3738 typename HalModel = typename HalPolicy::Model>
3739bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003740{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003741 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003742
3743 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3744 if (!input.IsValid())
3745 {
3746 return Fail("%s: Operation has invalid inputs", __func__);
3747 }
3748
3749 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3750 unsigned int rank = inputInfo.GetNumDimensions();
3751 if (rank > 4)
3752 {
3753 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3754 }
3755
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003756 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003757 if (!output)
3758 {
3759 return Fail("%s: Could not read output 0", __func__);
3760 }
3761
3762 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003763
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003764 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3765 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3766 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003767
3768 std::vector<int32_t> beginValues;
3769 std::vector<int32_t> endValues;
3770 std::vector<int32_t> stridesValues;
3771
3772 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003773 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003774 {
3775 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3776 {
3777 return false;
3778 }
3779
3780 if (operandValues.size() != rank)
3781 {
3782 return false;
3783 }
3784
3785 return true;
3786 };
3787
3788 if (!ValidateInputOperands(*beginOperand, beginValues)
3789 || !ValidateInputOperands(*endOperand, endValues)
3790 || !ValidateInputOperands(*stridesOperand, stridesValues))
3791 {
3792 return Fail("%s: Operation has invalid input operand", __func__);
3793 }
3794
3795 // Stride cannot have value '0'
3796 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3797 {
3798 return Fail("%s: Stride must be non-zero value.", __func__);
3799 }
3800
3801 armnn::StridedSliceDescriptor descriptor;
3802 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3803 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3804 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3805 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3806
3807 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3808 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3809 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3810 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3811 {
3812 return Fail("%s: Operation has invalid inputs", __func__);
3813 }
3814
3815 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003816 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3817 {
3818 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3819 IsStridedSliceSupported,
3820 data.m_Backends,
3821 isSupported,
3822 inputInfo,
3823 outputInfo,
3824 descriptor);
3825 };
3826
3827 if(IsDynamicTensor(outputInfo))
3828 {
3829 isSupported = AreDynamicTensorsSupported();
3830 }
3831 else
3832 {
3833 validateFunc(outputInfo, isSupported);
3834 }
3835
Mike Kelly46272802019-08-14 17:00:48 +01003836 if (!isSupported)
3837 {
3838 return false;
3839 }
3840
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003841 // Check if slice can fit in a inferred output
3842 armnn::TensorShape inputShape = inputInfo.GetShape();
3843 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3844 {
3845 int stride = descriptor.m_Stride[i];
3846 int start = descriptor.GetStartForAxis(inputShape, i);
3847 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3848
3849 if (descriptor.m_ShrinkAxisMask & (1 << i))
3850 {
3851 // If the difference between the start point and the end point of the slice on an axis being shrunk
3852 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3853 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3854 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3855 {
3856 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3857 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003858
3859 if(stride < 0)
3860 {
3861 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3862 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003863 }
3864 }
3865
Mike Kelly46272802019-08-14 17:00:48 +01003866 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3867 assert(layer != nullptr);
3868 input.Connect(layer->GetInputSlot(0));
3869
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003870 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003871}
3872
3873template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003874 typename HalOperation = typename HalPolicy::Operation,
3875 typename HalModel = typename HalPolicy::Model>
3876bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003877{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003878 using HalOperand = typename HalPolicy::Operand;
Kevin May63460512020-08-20 10:22:53 +01003879 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003880
3881 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3882 if (!input.IsValid())
3883 {
3884 return Fail("%s: Operation has invalid inputs", __func__);
3885 }
3886
3887 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3888 unsigned int rank = inputInfo.GetNumDimensions();
3889 if (rank > 4)
3890 {
3891 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3892 }
3893
3894 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3895 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003896 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003897
3898 std::vector<int32_t> perm(rank);
Kevin May63460512020-08-20 10:22:53 +01003899 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003900 {
Mike Kelly46272802019-08-14 17:00:48 +01003901 for (unsigned int i = rank; i > 0; i--)
3902 {
3903 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3904 }
3905 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003906 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003907 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003908 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003909 }
3910
3911 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3912
Mike Kelly4a956582020-02-28 10:32:09 +00003913 armnn::TransposeDescriptor transposeDesc;
3914 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003915
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003916 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003917 if (!output)
3918 {
3919 return Fail("%s: Could not read output 0", __func__);
3920 }
3921
3922 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3923
3924 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003925 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3926 {
3927 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3928 IsTransposeSupported,
3929 data.m_Backends,
3930 isSupported,
3931 inputInfo,
3932 outputInfo,
3933 transposeDesc);
3934 };
3935
3936 if(IsDynamicTensor(outputInfo))
3937 {
3938 isSupported = AreDynamicTensorsSupported();
3939 }
3940 else
3941 {
3942 validateFunc(outputInfo, isSupported);
3943 }
3944
Mike Kelly46272802019-08-14 17:00:48 +01003945 if (!isSupported)
3946 {
3947 return false;
3948 }
3949
Mike Kelly4a956582020-02-28 10:32:09 +00003950 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003951 assert(layer != nullptr);
3952 input.Connect(layer->GetInputSlot(0));
3953
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003954 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003955}
3956
3957template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003958 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003959 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003960 typename HalModel = typename HalPolicy::Model>
3961bool ConvertBatchToSpaceNd(const HalOperation& operation,
3962 const HalModel& model,
3963 ConversionData& data)
3964{
Finn Williams23b87b32019-07-30 11:44:05 +01003965
3966 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3967 if (!input.IsValid())
3968 {
3969 return Fail("%s: Operation has invalid inputs", __func__);
3970 }
3971
3972 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3973 if (!output)
3974 {
3975 return Fail("%s: Could not read output 0", __func__);
3976 }
3977
3978 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01003979
3980 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3981 if (!blockOperand)
3982 {
3983 return Fail("%s: Could not read input 1", __func__);
3984 }
3985
3986 // Convert the block operand to int32
3987 std::vector<int32_t> block;
3988 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3989 {
3990 return Fail("%s: Input 1 has invalid values", __func__);
3991 }
3992
3993 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3994
3995 unsigned int rank = inputInfo.GetNumDimensions();
3996 if (rank != 4)
3997 {
3998 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3999 }
4000
4001 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4002 {
4003 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4004 " greater than or equal to 1", __func__);
4005 }
4006
4007 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4008 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4009 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4010
Kevin May42477c12020-03-26 13:34:14 +00004011 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004012 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004013 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004014 }
4015 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4016 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4017
4018 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004019 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4020 {
4021 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4022 IsBatchToSpaceNdSupported,
4023 data.m_Backends,
4024 isSupported,
4025 inputInfo,
4026 outputInfo,
4027 batchToSpaceNdDesc);
4028 };
4029
4030 if(!IsDynamicTensor(outputInfo))
4031 {
4032 validateFunc(outputInfo, isSupported);
4033 }
4034 else
4035 {
4036 isSupported = AreDynamicTensorsSupported();
4037 }
4038
4039
Finn Williams23b87b32019-07-30 11:44:05 +01004040 if (!isSupported)
4041 {
4042 return false;
4043 }
4044
4045 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4046 assert(layer != nullptr);
4047 input.Connect(layer->GetInputSlot(0));
4048
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004049 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004050}
Mike Kelly0a879362019-07-29 16:56:31 +01004051
Finn Williamsd74c5052019-07-30 17:06:00 +01004052template<typename HalPolicy,
4053 typename HalOperation = typename HalPolicy::Operation,
4054 typename HalOperand = typename HalPolicy::Operand,
4055 typename HalModel = typename HalPolicy::Model>
4056bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4057{
4058 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4059 if (!input.IsValid())
4060 {
4061 return Fail("%s: Operation has invalid inputs", __func__);
4062 }
4063
4064 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4065 unsigned int rank = inputInfo.GetNumDimensions();
4066 unsigned int spatialDim = rank - 2;
4067
4068 if (rank != 4)
4069 {
4070 Fail("%s: Only inputs with rank 4 are supported", __func__);
4071 }
4072
4073 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4074 if (!output)
4075 {
4076 return Fail("%s: Could not read output 0", __func__);
4077 }
4078
4079 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004080
4081 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4082 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4083
4084 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4085 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4086 {
4087 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4088 }
4089
4090 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004091 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4092 {
4093 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4094 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004095 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4096 {
4097 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4098 }
4099
4100 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4101 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4102 {
4103 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4104 }
4105
4106 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4107 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004108 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4109 {
4110 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4111 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004112 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4113 {
4114 int paddingBeforeInput = paddings[i];
4115 int paddingAfterInput = paddings[i + 1];
4116 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4117 {
4118 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4119 }
4120
4121 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4122 }
4123
4124 armnn::SpaceToBatchNdDescriptor descriptor;
4125 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4126 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4127 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4128
Kevin May42477c12020-03-26 13:34:14 +00004129 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004130 {
4131 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4132 }
4133
4134 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004135 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4136 {
4137 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4138 IsSpaceToBatchNdSupported,
4139 data.m_Backends,
4140 isSupported,
4141 inputInfo,
4142 outputInfo,
4143 descriptor);
4144 };
4145
4146 if(IsDynamicTensor(outputInfo))
4147 {
4148 isSupported = AreDynamicTensorsSupported();
4149 }
4150 else
4151 {
4152 validateFunc(outputInfo, isSupported);
4153 }
4154
Finn Williamsd74c5052019-07-30 17:06:00 +01004155 if (!isSupported)
4156 {
4157 return false;
4158 }
4159
4160 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4161 assert(layer != nullptr);
4162 input.Connect(layer->GetInputSlot(0));
4163
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004164 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004165}
4166
saoste01b8471482018-10-10 09:44:51 +01004167} // namespace armnn_driver