blob: f2f95ac89230cab9bba1c1a3a0444e3d73a7b384 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
Finn Williamsa4983ce2020-07-23 12:55:12 +010065 void Disconnect(armnn::IInputSlot& inputSlot);
66
arovir01b0717b52018-09-05 17:03:25 +010067 const armnn::TensorInfo& GetTensorInfo() const;
68
69private:
70 armnn::IOutputSlot* m_OutputSlot;
71 bool m_Valid;
72 armnn::TensorInfo m_TensorInfo;
73};
74
75class ConstTensorPin
76{
77public:
78 // Creates an invalid tensor pin (can be used to signal errors)
79 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
80 ConstTensorPin(bool optional = false);
81
82 // @param tensorInfo TensorInfo associated with the tensor.
83 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
84 // the model being converted.
85 // @param numBytes Number of bytes for the tensor data.
86 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
87 const armnn::PermutationVector& mappings);
88
89 ConstTensorPin(const ConstTensorPin& other) = delete;
90 ConstTensorPin(ConstTensorPin&& other) = default;
91
92 bool IsValid() const;
93 bool IsOptional() const;
94
95 const armnn::ConstTensor& GetConstTensor() const;
96 const armnn::ConstTensor* GetConstTensorPtr() const;
97
98private:
99 armnn::ConstTensor m_ConstTensor;
100
101 // Owned memory for swizzled tensor data, only required if the tensor needed
102 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
103 // the pools associated with the model being converted.
104 std::vector<uint8_t> m_SwizzledTensorData;
105
106 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
107 bool m_Optional;
108};
109
110} // namespace armnn_driver
111
112///
113/// Utility functions
114///
115
116namespace
117{
118
119using namespace armnn_driver;
120using namespace android::nn;
121
122// Convenience function to log the reason for failing to convert a model.
123// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
124template<class... Args>
125static bool Fail(const char* formatStr, Args&&... args)
126{
127 ALOGD(formatStr, std::forward<Args>(args)...);
128 return false;
129}
130
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100131// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
132// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
133#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100134try \
135{ \
136 for (auto&& backendId : backends) \
137 { \
138 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
139 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100140 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100141 std::string reasonIfUnsupported; \
142 supported = \
143 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
144 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100146 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100147 } \
148 else \
149 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100150 if (reasonIfUnsupported.size() > 0) \
151 { \
152 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
153 } \
154 else \
155 { \
156 ALOGD("%s: not supported by armnn", funcName); \
157 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100158 } \
159 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100163 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100164 } \
165 if (!supported) \
166 { \
167 ALOGD("%s: not supported by any specified backend", funcName); \
168 } \
169} \
170catch (const armnn::InvalidArgumentException &e) \
171{ \
172 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
173}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100174
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000175template<typename HalOperand>
176armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100177{
178 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
179}
180
Matthew Bentham912b3622019-05-03 15:49:14 +0100181inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100182{
Matthew Bentham912b3622019-05-03 15:49:14 +0100183 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
184 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100186}
187
Kevin May42477c12020-03-26 13:34:14 +0000188#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189
Keith Davis71006492020-01-06 17:44:16 +0000190// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100191inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
192{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000194 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000195 type == V1_2::OperandType::TENSOR_FLOAT16 ||
196 type == V1_2::OperandType::TENSOR_FLOAT32 ||
197 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000198 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
200 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100201 type == V1_2::OperandType::TENSOR_INT32;
202}
203
204#endif
205
Kevin May42477c12020-03-26 13:34:14 +0000206#ifdef ARMNN_ANDROID_NN_V1_3
207
208// Support within the 1.3 driver for specific tensor data types
209inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
210{
211 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100212 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000213 type == V1_3::OperandType::TENSOR_FLOAT16 ||
214 type == V1_3::OperandType::TENSOR_FLOAT32 ||
215 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100216 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000217 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
218 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
219 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
220 type == V1_3::OperandType::TENSOR_INT32;
221}
222
223#endif
224
Mike Kellyb5fdf382019-06-11 16:35:25 +0100225inline bool IsBool(V1_0::Operand)
226{
227 return false;
228}
229
Kevin May42477c12020-03-26 13:34:14 +0000230inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100231{
232 return false;
233}
234
Kevin May42477c12020-03-26 13:34:14 +0000235#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100236
237inline bool IsBool(V1_2::Operand operand)
238{
239 return operand.type == V1_2::OperandType::BOOL;
240}
241
Sadik Armagan61113162019-07-25 09:09:40 +0100242/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000243inline bool Is12OrLaterOperand(V1_2::Operand)
244{
245 return true;
246}
247
248#endif
249
250#ifdef ARMNN_ANDROID_NN_V1_3
251
252inline bool IsBool(V1_3::Operand operand)
253{
254 return operand.type == V1_3::OperandType::BOOL;
255}
256
257/// Checks if a operand is 1_2 Operand
258inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100259{
260 return true;
261}
262
Mike Kellyb5fdf382019-06-11 16:35:25 +0100263#endif
264
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100265template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000266armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
267 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100268 armnn::TensorInfo reshapeInfo)
269{
270 armnn::ReshapeDescriptor reshapeDescriptor;
271 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
272
273 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100274 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275
276 // Attach the input layer to the reshape layer
277 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
278 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
279
280 return *reshapeLayer;
281}
282
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000283bool BroadcastTensor(LayerInputHandle& input0,
284 LayerInputHandle& input1,
285 armnn::IConnectableLayer* startLayer,
286 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100287{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100288 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100289
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100290 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
291 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
292
293 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
294 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
295
296 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100297 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100298 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
299 input0.Connect(startLayer->GetInputSlot(0));
300 input1.Connect(startLayer->GetInputSlot(1));
301
Sadik Armagan64b19b52019-08-19 09:49:58 +0100302 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100303 }
304
305 // Since the number of dimensions do not match then we need to add degenerate dimensions
306 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
307
308 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
309 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
310 boost::numeric_cast<int>(inputDimensions1));
311
312 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
313 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
314 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
315
316 const armnn::TensorShape& smallShape = smallInfo.GetShape();
317 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
318 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
319 {
320 reshapedDimensions[i] = smallShape[i - sizeDifference];
321 }
322
323 armnn::TensorInfo reshapedInfo = smallInfo;
324 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
325 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100326
327 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
328 armnn::ReshapeDescriptor reshapeDescriptor;
329
330 bool isSupported = false;
331 FORWARD_LAYER_SUPPORT_FUNC(__func__,
332 IsReshapeSupported,
333 data.m_Backends,
334 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000335 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100336 reshapedInfo,
337 reshapeDescriptor);
338 if (!isSupported)
339 {
340 return false;
341 }
342
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100343 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100344 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100345
346 if (input0IsSmaller)
347 {
348 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
349 //
350 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100351 // | |
352 // Reshape |
353 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100355
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100356 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
357 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100358 }
359 else
360 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100361 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
362 //
363 // Input0 Input1
364 // | |
365 // | Reshape
366 // \ /
367 // StartLayer
368
arovir01b0717b52018-09-05 17:03:25 +0100369 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100370 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100371 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100372
373 return true;
arovir01b0717b52018-09-05 17:03:25 +0100374}
375
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000376void CalcPadding(uint32_t input,
377 uint32_t kernel,
378 uint32_t stride,
379 uint32_t& outPadHead,
380 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100381 android::nn::PaddingScheme scheme)
382{
383 int32_t padHead;
384 int32_t padTail;
385 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
386 outPadHead = boost::numeric_cast<uint32_t>(padHead);
387 outPadTail = boost::numeric_cast<uint32_t>(padTail);
388}
389
Kevin May42477c12020-03-26 13:34:14 +0000390#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100391
392void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
393 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
394{
395 int32_t padHead;
396 int32_t padTail;
397 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
398 outPadHead = boost::numeric_cast<uint32_t>(padHead);
399 outPadTail = boost::numeric_cast<uint32_t>(padTail);
400}
401
Mike Kelly26123db2020-01-15 10:02:33 +0000402void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100403 int32_t& outPadTail, android::nn::PaddingScheme scheme)
404{
405 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
406}
407
Mike Kelly86b36d42019-07-12 16:39:33 +0100408#endif
409
Matthew Bentham912b3622019-05-03 15:49:14 +0100410Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100411{
412 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100413 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100414 shape.dimensions = operand.dimensions;
415 shape.scale = operand.scale;
416 shape.offset = operand.zeroPoint;
417 return shape;
418}
419
Kevin May42477c12020-03-26 13:34:14 +0000420#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100421
422Shape GetOperandShape(const V1_2::Operand& operand)
423{
424 Shape shape;
425 shape.type = OperandType(operand.type);
426 shape.dimensions = operand.dimensions;
427 shape.scale = operand.scale;
428 shape.offset = operand.zeroPoint;
429 return shape;
430}
431
432#endif
433
Kevin May42477c12020-03-26 13:34:14 +0000434#ifdef ARMNN_ANDROID_NN_V1_3
435
436Shape GetOperandShape(const V1_3::Operand& operand)
437{
438 Shape shape;
439 shape.type = OperandType(operand.type);
440 shape.dimensions = operand.dimensions;
441 shape.scale = operand.scale;
442 shape.offset = operand.zeroPoint;
443 return shape;
444}
445
446#endif
447
arovir01b0717b52018-09-05 17:03:25 +0100448// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
449// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100450// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
451// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100452void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000453 const armnn::TensorInfo& weightInfo,
454 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100455{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100457 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000458 // NOTE: Bias scale is always set to 0 for per-axis quantization and
459 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
460 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100461 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000462 return biasScale * inputInfo.GetQuantizationScale();
463 };
464
465 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
466 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
467
468 biasInfo.SetQuantizationScales(biasScales);
469 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
470
471 ALOGV("Bias quantization params have been updated for per-axis quantization");
472 }
473 else
474 {
475 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
476 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
477 {
478 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
479 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
480 {
481 ALOGW("Bias quantization scale has been modified to match input * weights");
482 biasInfo.SetQuantizationScale(expectedBiasScale);
483 }
arovir01b0717b52018-09-05 17:03:25 +0100484 }
485 }
486}
487
488// 4D Tensor Permutations
489const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100490const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
491
492// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000493const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
494const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100495
496template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000497armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
498 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100499{
500 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000501 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100502
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100503 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100504
505 // Connect input to swizzle layer
506 input.Connect(layer->GetInputSlot(0));
507
508 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000509 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100510 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
511
512 return *layer;
513}
514
arovir01b0717b52018-09-05 17:03:25 +0100515bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
516 const armnn::TensorShape & outputShape,
517 uint32_t concatDim)
518{
519 // Validate the output shape is correct given the input shapes (which have just been validated)
520 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
521 if (outputShape.GetNumDimensions() != numDimensions)
522 {
523 return Fail("%s: Output shape has wrong number of dimensions", __func__);
524 }
525
526 unsigned int outputSizeAlongConcatenatedDimension = 0;
527 for (unsigned int i = 0; i < inputShapes.size(); i++)
528 {
529 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
530 }
531
532 for (unsigned int i = 0; i < numDimensions; ++i)
533 {
534 if (i == concatDim)
535 {
536 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
537 {
538 return Fail(
539 "%s: Invalid output shape for dimension %d (%d != %d)",
540 __func__,
541 i,
542 outputShape[i],
543 outputSizeAlongConcatenatedDimension);
544 }
545 }
546 else
547 {
548 if (outputShape[i] != inputShapes[0][i])
549 {
550 return Fail("%s: Invalid output shape", __func__);
551 }
552 }
553 }
554
555 return true;
556}
557
558bool RequiresReshape(armnn::TensorShape & inputShape)
559{
560 return inputShape.GetNumDimensions() < 3;
561}
562
arovir01b0717b52018-09-05 17:03:25 +0100563void SwizzleInputs(armnn::INetwork& network,
564 std::vector<LayerInputHandle>& inputs,
565 std::vector<armnn::TensorShape>& inputShapes,
566 const armnn::PermutationVector& mapping)
567{
568 if (!mapping.IsEqual(IdentityPermutation4D))
569 {
570 size_t nInputs = inputs.size();
571 for (size_t i=0; i<nInputs; ++i)
572 {
573 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000574 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100575 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
576 auto& outputInfo = outputSlot.GetTensorInfo();
577 // replace inputs with the swizzled ones
578 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
579 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
580 }
581 }
582}
583
Teresa Charlin185f5882020-04-06 21:59:18 +0100584bool TransposeInputTensors(ConversionData& data,
585 std::vector<LayerInputHandle>& inputs,
586 std::vector<armnn::TensorShape>& inputShapes,
587 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000588{
589 if (!mapping.IsEqual(IdentityPermutation4D))
590 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100591 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000592 size_t nInputs = inputs.size();
593 for (size_t i=0; i<nInputs; ++i)
594 {
595 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000596 armnn::TransposeDescriptor transposeDesc;
597 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100598 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000599
600 bool isSupported = false;
601 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000602 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000603 data.m_Backends,
604 isSupported,
605 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100606 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000607 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000608 if (!isSupported)
609 {
610 return false;
611 }
612
613 }
614 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
615 }
616 return true;
617}
618
619
narpra01f176d5a2018-11-18 20:17:48 +0000620bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
621 int32_t & concatDimension,
622 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100623{
narpra01f176d5a2018-11-18 20:17:48 +0000624 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100625 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100626
627 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000628 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
629 // or along dimension 0 or 2 for a 3-D tensor.
630 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100631 {
narpra01f176d5a2018-11-18 20:17:48 +0000632 concatDimension = 1;
633 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
634 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100635 }
narpra01f176d5a2018-11-18 20:17:48 +0000636 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100637 {
narpra01f176d5a2018-11-18 20:17:48 +0000638 concatDimension = 0;
639 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
640 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100641 }
narpra01f176d5a2018-11-18 20:17:48 +0000642 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100643}
644
645} // anonymous namespace
646
647namespace armnn_driver
648{
649
650//// Creates an ArmNN activation layer and connects it to the given layer, if the
651//// passed in AndroidNN activation function requires so.
652//// @return The end layer of the sequence of layers built for the given AndroidNN
653//// activation function or nullptr if an error occurred (e.g. unsupported activation).
654//// Note that the end layer matches the input layer if no activation is required
655//// (the sequence of layers has length 1).
656armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
657 ActivationFn activation,
658 armnn::IConnectableLayer* prevLayer,
659 ConversionData& data);
660
661} // namespace armnn_driver
662
663///
664/// Utility templates
665///
666
667namespace armnn_driver
668{
669
670using namespace android::nn;
671
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672template<typename HalPolicy,
673 typename HalOperand = typename HalPolicy::Operand,
674 typename HalOperation = typename HalPolicy::Operation,
675 typename HalModel = typename HalPolicy::Model>
676const HalOperand* GetInputOperand(const HalOperation& operation,
677 uint32_t inputIndex,
678 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100679 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100680{
681 if (inputIndex >= operation.inputs.size())
682 {
saoste01b8471482018-10-10 09:44:51 +0100683 if (failOnIndexOutOfBounds)
684 {
685 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
686 }
arovir01b0717b52018-09-05 17:03:25 +0100687 return nullptr;
688 }
689
Kevin May42477c12020-03-26 13:34:14 +0000690 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100691 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000692 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100693}
694
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100695template<typename HalPolicy,
696 typename HalOperand = typename HalPolicy::Operand,
697 typename HalOperation = typename HalPolicy::Operation,
698 typename HalModel = typename HalPolicy::Model>
699const HalOperand* GetOutputOperand(const HalOperation& operation,
700 uint32_t outputIndex,
701 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100702{
703 if (outputIndex >= operation.outputs.size())
704 {
705 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
706 return nullptr;
707 }
708
709 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100710 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100711
Kevin May42477c12020-03-26 13:34:14 +0000712 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100713}
714
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100715template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100716 typename HalOperand = typename HalPolicy::Operand,
717 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100718const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100719 const HalModel& model,
720 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000721 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100722{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100724
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100726 switch (operand.lifetime)
727 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100728 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100729 {
730 // Constant found in model.operandValues
731 valueStart = &model.operandValues[operand.location.offset];
732 break;
733 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100735 {
736 // Constant specified via a Memory object
737 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
738 break;
739 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000741 {
742 // An optional input tensor with no values is not an error so should not register as a fail
743 if (optional)
744 {
745 valueStart = nullptr;
746 break;
747 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100748 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000749 }
arovir01b0717b52018-09-05 17:03:25 +0100750 default:
751 {
752 // Unsupported/invalid (e.g. can't get value of an input to the model)
753 Fail("%s: unsupported/invalid operand lifetime: %s",
754 __func__, toString(operand.lifetime).c_str());
755 valueStart = nullptr;
756 }
757 }
758
759 return valueStart;
760}
761
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100762template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100763 typename HalOperation = typename HalPolicy::Operation,
764 typename HalModel = typename HalPolicy::Model,
765 typename HalOperandType = typename HalPolicy::OperandType>
766bool GetOperandType(const HalOperation& operation,
767 uint32_t inputIndex,
768 const HalModel& model,
769 HalOperandType& type)
770{
771 using HalOperand = typename HalPolicy::Operand;
772
773 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
774 if (!operand)
775 {
776 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
777 }
778
779 type = operand->type;
780 return true;
781}
782
783template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000784 typename HalOperand = typename HalPolicy::Operand>
785bool IsOperandConstant(const HalOperand& operand)
786{
787 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
788
789 HalOperandLifeTime lifetime = operand.lifetime;
790
791 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
792 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
793 lifetime == HalOperandLifeTime::NO_VALUE;
794}
795
796template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100797 typename HalOperand = typename HalPolicy::Operand,
798 typename HalModel = typename HalPolicy::Model>
799ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
800 const HalModel& model,
801 const ConversionData& data,
802 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
803 const armnn::TensorShape* overrideTensorShape = nullptr,
804 bool optional = false)
805{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100806 if (!IsOperandTypeSupportedForTensors(operand.type))
807 {
808 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
809 return ConstTensorPin();
810 }
811
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000812 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813 {
814 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
815 return ConstTensorPin();
816 }
817
818 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
819 if (!valueStart)
820 {
821 if (optional)
822 {
823 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
824 return ConstTensorPin(true);
825 }
826 // mandatory tensor with no values
827 Fail("%s: failed to get operand address", __func__);
828 return ConstTensorPin();
829 }
830
831 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000832 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
833 if (tensorInfo.HasPerAxisQuantization())
834 {
835 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
836 }
837
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100838 if (overrideTensorShape != nullptr)
839 {
840 tensorInfo.SetShape(*overrideTensorShape);
841 }
842 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
843}
844
845template<typename HalPolicy,
846 typename HalOperation = typename HalPolicy::Operation,
847 typename HalModel = typename HalPolicy::Model>
848ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
849 uint32_t inputIndex,
850 const HalModel& model,
851 const ConversionData& data,
852 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
853 const armnn::TensorShape* overrideTensorShape = nullptr,
854 bool optional = false)
855{
856 using HalOperand = typename HalPolicy::Operand;
857
858 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
859 if (!operand)
860 {
861 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
862 return ConstTensorPin();
863 }
864 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
865 model,
866 data,
867 dimensionMappings,
868 overrideTensorShape,
869 optional);
870}
871
872template<typename HalPolicy,
873 typename OutputType,
874 typename HalOperandType = typename HalPolicy::OperandType,
875 typename HalOperation = typename HalPolicy::Operation,
876 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100877bool GetInputScalar(const HalOperation& operation,
878 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100879 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100880 OutputType& outValue,
881 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100882 const ConversionData& data,
883 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100884{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 using HalOperand = typename HalPolicy::Operand;
886
887 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100888 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100889 {
890 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
891 }
892
Sadik Armagan813f2302020-05-19 14:10:30 +0100893 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100894 {
895 return Fail("%s: unexpected operand type: %s (should be %s)",
896 __func__, toString(operand->type).c_str(), toString(type).c_str());
897 }
898
Sadik Armagan813f2302020-05-19 14:10:30 +0100899 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100900 {
901 return Fail("%s: incorrect operand location length: %i (should be %i)",
902 __func__, operand->location.length, sizeof(OutputType));
903 }
904
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100905 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100906 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: failed to get address for operand", __func__);
909 }
910
Sadik Armagan813f2302020-05-19 14:10:30 +0100911 if(!optional)
912 {
913 outValue = *(static_cast<const OutputType*>(valueAddress));
914 }
915
arovir01b0717b52018-09-05 17:03:25 +0100916 return true;
917}
918
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100919template<typename HalPolicy,
920 typename HalOperation = typename HalPolicy::Operation,
921 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100922bool GetInputInt32(const HalOperation& operation,
923 uint32_t inputIndex,
924 int32_t& outValue,
925 const HalModel& model,
926 const ConversionData& data)
927{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100929}
930
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100931template<typename HalPolicy,
932 typename HalOperation = typename HalPolicy::Operation,
933 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100934bool GetInputFloat32(const HalOperation& operation,
935 uint32_t inputIndex,
936 float& outValue,
937 const HalModel& model,
938 const ConversionData& data)
939{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100941}
942
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943template<typename HalPolicy,
944 typename HalOperation = typename HalPolicy::Operation,
945 typename HalOperandType = typename HalPolicy::OperandType,
946 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100947bool GetInputActivationFunctionImpl(const HalOperation& operation,
948 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100949 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100950 ActivationFn& outActivationFunction,
951 const HalModel& model,
952 const ConversionData& data)
953{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100954 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100955 {
956 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
957 __func__,
958 toString(type).c_str(),
959 toString(OperandType::INT32).c_str(),
960 toString(OperandType::TENSOR_INT32).c_str());
961 }
962
963 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100964 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100965 {
966 return Fail("%s: failed to get activation input value", __func__);
967 }
968 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
969 return true;
970}
971
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972template<typename HalPolicy,
973 typename HalOperation = typename HalPolicy::Operation,
974 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100975bool GetInputActivationFunction(const HalOperation& operation,
976 uint32_t inputIndex,
977 ActivationFn& outActivationFunction,
978 const HalModel& model,
979 const ConversionData& data)
980{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981 return GetInputActivationFunctionImpl<HalPolicy>(operation,
982 inputIndex,
983 HalPolicy::OperandType::INT32,
984 outActivationFunction,
985 model,
986 data);
arovir01b0717b52018-09-05 17:03:25 +0100987}
988
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100989template<typename HalPolicy,
990 typename HalOperation = typename HalPolicy::Operation,
991 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100992bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
993 uint32_t inputIndex,
994 ActivationFn& outActivationFunction,
995 const HalModel& model,
996 const ConversionData& data)
997{
998 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100999 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1000 inputIndex,
1001 HalPolicy::OperandType::INT32,
1002 outActivationFunction,
1003 model,
1004 data);
arovir01b0717b52018-09-05 17:03:25 +01001005}
1006
1007
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008template<typename HalPolicy,
1009 typename HalOperation = typename HalPolicy::Operation,
1010 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001011bool GetOptionalInputActivation(const HalOperation& operation,
1012 uint32_t inputIndex,
1013 ActivationFn& activationFunction,
1014 const HalModel& model,
1015 const ConversionData& data)
1016{
1017 if (operation.inputs.size() <= inputIndex)
1018 {
1019 activationFunction = ActivationFn::kActivationNone;
1020 }
1021 else
1022 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001024 {
1025 return Fail("%s: Operation has invalid inputs", __func__);
1026 }
1027 }
1028 return true;
1029}
1030
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031template<typename HalPolicy,
1032 typename ConvolutionDescriptor,
1033 typename HalOperation = typename HalPolicy::Operation,
1034 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001035bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1036 uint32_t dilationXIndex,
1037 ConvolutionDescriptor& descriptor,
1038 const HalModel& model,
1039 const ConversionData& data)
1040{
1041 bool success = true;
1042 if (operation.inputs.size() >= dilationXIndex + 2)
1043 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001044 success &= GetInputScalar<HalPolicy>(operation,
1045 dilationXIndex,
1046 HalPolicy::OperandType::INT32,
1047 descriptor.m_DilationX,
1048 model,
1049 data);
1050 success &= GetInputScalar<HalPolicy>(operation,
1051 dilationXIndex + 1,
1052 HalPolicy::OperandType::INT32,
1053 descriptor.m_DilationY,
1054 model,
1055 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001056 }
1057
1058 return success;
1059}
1060
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001061template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001062 typename HalOperation = typename HalPolicy::Operation,
1063 typename HalModel = typename HalPolicy::Model>
1064bool GetOptionalBool(const HalOperation& operation,
1065 uint32_t inputIndex,
1066 const HalModel& model,
1067 const ConversionData& data)
1068{
1069 using HalOperand = typename HalPolicy::Operand;
1070
1071 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1072 if (!operand)
1073 {
1074 return false;
1075 }
1076
1077 if (!IsBool(*operand))
1078 {
1079 return false;
1080 }
1081
1082 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1083 if (!valueAddress)
1084 {
1085 return false;
1086 }
1087
1088 if (*(static_cast<const bool*>(valueAddress)))
1089 {
1090 return true;
1091 }
1092 else
1093 {
1094 return false;
1095 }
1096}
1097
1098template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099 typename HalOperand = typename HalPolicy::Operand,
1100 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001101bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001102 std::vector<int32_t>& outValues,
1103 const HalModel& model,
1104 const ConversionData& data)
1105{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001106 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001107 {
1108 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1109 }
1110
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001111 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001112 if (!startAddress)
1113 {
1114 return Fail("%s: failed to get operand address", __func__, operand.type);
1115 }
1116
1117 // Check number of bytes is sensible
1118 const uint32_t numBytes = operand.location.length;
1119 if (numBytes % sizeof(int32_t) != 0)
1120 {
1121 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1122 __func__, numBytes, sizeof(int32_t));
1123 }
1124
1125 outValues.resize(numBytes / sizeof(int32_t));
1126 memcpy(outValues.data(), startAddress, numBytes);
1127 return true;
1128}
1129
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001130template<typename HalPolicy,
1131 typename HalOperation = typename HalPolicy::Operation,
1132 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001133bool GetInputPaddingScheme(const HalOperation& operation,
1134 uint32_t inputIndex,
1135 PaddingScheme& outPaddingScheme,
1136 const HalModel& model,
1137 const ConversionData& data)
1138{
1139 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001140 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001141 {
1142 return Fail("%s: failed to get padding scheme input value", __func__);
1143 }
1144
1145 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1146 return true;
1147}
1148
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001149template<typename HalPolicy,
1150 typename HalOperation = typename HalPolicy::Operation,
1151 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001152LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1153 uint32_t inputIndex,
1154 const HalModel& model,
1155 ConversionData& data)
1156{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001158 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001159 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1160
1161 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001162 if (!operand)
1163 {
1164 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1165 return LayerInputHandle();
1166 }
1167
1168 if (!IsOperandTypeSupportedForTensors(operand->type))
1169 {
1170 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1171 return LayerInputHandle();
1172 }
1173
Sadik Armagan44bcc022019-06-18 17:21:36 +01001174 try
arovir01b0717b52018-09-05 17:03:25 +01001175 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001176 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001177 if (IsDynamicTensor(operandTensorInfo))
1178 {
1179 Fail("%s: dynamic input tensors are not supported", __func__);
1180 return LayerInputHandle();
1181 }
arovir01b0717b52018-09-05 17:03:25 +01001182
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001186 {
1187 // NOTE: We must check whether we can support the input tensor on at least one
1188 // of the provided backends; otherwise we cannot convert the operation
1189 bool isInputSupported = false;
1190 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1191 IsInputSupported,
1192 data.m_Backends,
1193 isInputSupported,
1194 operandTensorInfo);
1195
1196 if (!isInputSupported)
1197 {
1198 Fail("%s: unsupported input tensor", __func__);
1199 return LayerInputHandle();
1200 }
1201
1202 BOOST_FALLTHROUGH; // intentional fallthrough
1203 }
1204 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001205 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001206 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001207 // The tensor is either an operand internal to the model, or a model input.
1208 // It can be associated with an ArmNN output slot for an existing layer.
1209
1210 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1211 const uint32_t operandIndex = operation.inputs[inputIndex];
1212 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001213 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001214 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001215 case HalOperandLifeTime::CONSTANT_REFERENCE:
1216 {
1217 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1218 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1219 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001220 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001221 bool isSupported = false;
1222 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1223 IsConstantSupported,
1224 data.m_Backends,
1225 isSupported,
1226 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001227 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001228 {
1229 return LayerInputHandle();
1230 }
1231
1232 armnn::IConnectableLayer* constantLayer =
1233 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1234 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1235 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1236
1237 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1238 }
1239 else
1240 {
1241 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001242 return LayerInputHandle();
1243 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001244 break;
arovir01b0717b52018-09-05 17:03:25 +01001245 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001246 default:
arovir01b0717b52018-09-05 17:03:25 +01001247 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001248 // Unsupported lifetime for an input tensor
1249 Fail("%s: unsupported lifetime for input tensor: %s",
1250 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001251 return LayerInputHandle();
1252 }
arovir01b0717b52018-09-05 17:03:25 +01001253 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001254 }
1255 catch (UnsupportedOperand<HalOperandType>& e)
1256 {
1257 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1258 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001259 }
1260}
1261
Kevin May42477c12020-03-26 13:34:14 +00001262
1263#ifdef ARMNN_ANDROID_NN_V1_3
1264template<typename HalPolicy>
1265LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1266 uint32_t inputIndex,
1267 const::android::hardware::neuralnetworks::V1_3::Model& model,
1268 ConversionData& data)
1269{
1270 using HalOperand = typename HalPolicy::Operand;
1271 using HalOperandType = typename HalPolicy::OperandType;
1272 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1273
1274 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1275 if (!operand)
1276 {
1277 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1278 return LayerInputHandle();
1279 }
1280
1281 if (!IsOperandTypeSupportedForTensors(operand->type))
1282 {
1283 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1284 return LayerInputHandle();
1285 }
1286
1287 try
1288 {
1289 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1290 if (IsDynamicTensor(operandTensorInfo))
1291 {
1292 Fail("%s: dynamic input tensors are not supported", __func__);
1293 return LayerInputHandle();
1294 }
1295
1296 switch (operand->lifetime)
1297 {
1298 case HalOperandLifeTime::SUBGRAPH_INPUT:
1299 {
1300 // NOTE: We must check whether we can support the input tensor on at least one
1301 // of the provided backends; otherwise we cannot convert the operation
1302 bool isInputSupported = false;
1303 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1304 IsInputSupported,
1305 data.m_Backends,
1306 isInputSupported,
1307 operandTensorInfo);
1308
1309 if (!isInputSupported)
1310 {
1311 Fail("%s: unsupported input tensor", __func__);
1312 return LayerInputHandle();
1313 }
1314
1315 BOOST_FALLTHROUGH; // intentional fallthrough
1316 }
1317 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1318 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1319 {
1320 // The tensor is either an operand internal to the model, or a model input.
1321 // It can be associated with an ArmNN output slot for an existing layer.
1322
1323 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1324 const uint32_t operandIndex = operation.inputs[inputIndex];
1325 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1326 }
1327 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1328 case HalOperandLifeTime::CONSTANT_REFERENCE:
1329 {
1330 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1331 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1332 if (tensorPin.IsValid())
1333 {
1334 bool isSupported = false;
1335 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1336 IsConstantSupported,
1337 data.m_Backends,
1338 isSupported,
1339 tensorPin.GetConstTensor().GetInfo());
1340 if (!isSupported)
1341 {
1342 return LayerInputHandle();
1343 }
1344
1345 armnn::IConnectableLayer* constantLayer =
1346 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1347 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1348 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1349
1350 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1351 }
1352 else
1353 {
1354 Fail("%s: invalid operand tensor", __func__);
1355 return LayerInputHandle();
1356 }
1357 break;
1358 }
1359 default:
1360 {
1361 // Unsupported lifetime for an input tensor
1362 Fail("%s: unsupported lifetime for input tensor: %s",
1363 __func__, toString(operand->lifetime).c_str());
1364 return LayerInputHandle();
1365 }
1366 }
1367 }
1368 catch (UnsupportedOperand<HalOperandType>& e)
1369 {
1370 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1371 return LayerInputHandle();
1372 }
1373}
1374#endif
1375
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001376template<typename HalPolicy,
1377 typename HalOperation = typename HalPolicy::Operation,
1378 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001379bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1380 uint32_t operationOutputIndex,
1381 armnn::IConnectableLayer& layer,
1382 uint32_t layerOutputIndex,
1383 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001384 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001385 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1386 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001387{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001388 using HalOperand = typename HalPolicy::Operand;
1389
1390 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001391 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1392 {
1393 return false;
1394 }
1395
1396 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1397
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001398 if (overrideOutputInfo == nullptr)
1399 {
1400 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1401 }
1402 else
1403 {
1404 outputSlot.SetTensorInfo(*overrideOutputInfo);
1405 }
1406
1407 // Type one dynamic tensors require the previous layer's output shape for inference
1408 if (!layer.GetInputSlot(0).GetConnection() &&
1409 IsDynamicTensor(outputSlot.GetTensorInfo()))
1410 {
1411 return false;
1412 }
1413
Finn Williamsa4983ce2020-07-23 12:55:12 +01001414 bool isSupported = false;
1415 if (validateFunc &&
1416 layer.GetInputSlot(0).GetConnection() &&
1417 IsDynamicTensor(outputSlot.GetTensorInfo()))
Sadik Armagan813f2302020-05-19 14:10:30 +01001418 {
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001419 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001420 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001421 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001422 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1423
1424 if(!isSupported)
1425 {
1426 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1427 {
1428 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1429 }
1430
1431 return false;
1432 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001433 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001434
Finn Williamsa4983ce2020-07-23 12:55:12 +01001435 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1436 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1437
Mike Kellyb5fdf382019-06-11 16:35:25 +01001438 return true;
1439}
1440
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001441template<typename HalPolicy,
1442 typename HalOperation = typename HalPolicy::Operation,
1443 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001444armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1445 uint32_t inputIndex,
1446 const HalModel& model,
1447 ConversionData& data)
1448{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001449 using HalOperand = typename HalPolicy::Operand;
1450
1451 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001452 if (!operand)
1453 {
1454 return armnn::DataLayout::NHWC;
1455 }
1456
1457 if (!IsBool(*operand))
1458 {
1459 return armnn::DataLayout::NHWC;
1460 }
1461
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001462 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001463 if (!valueAddress)
1464 {
1465 return armnn::DataLayout::NHWC;
1466 }
1467
1468 if (*(static_cast<const bool*>(valueAddress)))
1469 {
1470 return armnn::DataLayout::NCHW;
1471 }
1472 else
1473 {
1474 return armnn::DataLayout::NHWC;
1475 }
1476}
1477
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001478template<typename HalPolicy,
1479 typename HalOperation = typename HalPolicy::Operation,
1480 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001481bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1482 uint32_t outputIndex,
1483 armnn::IConnectableLayer& layer,
1484 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001485 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001486 const armnn::TensorInfo* overrideOutputInfo = nullptr,
1487 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001488{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001489 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1490 outputIndex,
1491 layer,
1492 outputIndex,
1493 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001494 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001495 overrideOutputInfo,
1496 validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001497}
1498
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001499template<typename HalPolicy,
1500 typename HalOperation = typename HalPolicy::Operation,
1501 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001502bool ConvertToActivation(const HalOperation& operation,
1503 const char* operationName,
1504 const armnn::ActivationDescriptor& activationDesc,
1505 const HalModel& model,
1506 ConversionData& data)
1507{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001508 using HalOperand = typename HalPolicy::Operand;
1509
1510 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001511 if (!input.IsValid())
1512 {
1513 return Fail("%s: Input 0 is invalid", operationName);
1514 }
1515
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001516 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001517 if (!outputOperand)
1518 {
1519 return false;
1520 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001521
1522 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001523
1524 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001525
1526 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1527 {
1528 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1529 IsActivationSupported,
1530 data.m_Backends,
1531 isSupported,
1532 input.GetTensorInfo(),
1533 outInfo,
1534 activationDesc);
1535 };
1536
1537 if(IsDynamicTensor(outInfo))
1538 {
1539 isSupported = AreDynamicTensorsSupported();
1540 }
1541 else
1542 {
1543 validateFunc(outInfo, isSupported);
1544 }
1545
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001546 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001547 {
1548 return false;
1549 }
1550
1551 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001552 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001553 input.Connect(layer->GetInputSlot(0));
1554
Finn Williamsa4983ce2020-07-23 12:55:12 +01001555 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001556}
1557
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001558template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001559 typename HalOperation = typename HalPolicy::Operation,
1560 typename HalModel = typename HalPolicy::Model>
1561bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1562{
1563 armnn::ActivationDescriptor desc;
1564 desc.m_Function = armnn::ActivationFunction::ReLu;
1565
1566 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1567}
1568
1569template<typename HalPolicy,
1570 typename HalOperation = typename HalPolicy::Operation,
1571 typename HalModel = typename HalPolicy::Model>
1572bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1573{
1574 armnn::ActivationDescriptor desc;
1575 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1576 desc.m_A = 1.0f;
1577 desc.m_B = -1.0f;
1578
1579 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1580}
1581
1582template<typename HalPolicy,
1583 typename HalOperation = typename HalPolicy::Operation,
1584 typename HalModel = typename HalPolicy::Model>
1585bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1586{
1587 armnn::ActivationDescriptor desc;
1588 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1589 desc.m_A = 6.0f;
1590
1591 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1592}
1593
1594template<typename HalPolicy,
1595 typename HalOperation = typename HalPolicy::Operation,
1596 typename HalModel = typename HalPolicy::Model>
1597bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1598{
1599 armnn::ActivationDescriptor desc;
1600 desc.m_Function = armnn::ActivationFunction::TanH;
1601 desc.m_A = 1.0f; // android nn does not support tanH parameters
1602 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1603
1604 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1605}
1606
1607template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001608 typename HalOperation = typename HalPolicy::Operation,
1609 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001610bool ConvertPaddings(const HalOperation& operation,
1611 const HalModel& model,
1612 ConversionData& data,
1613 unsigned int rank,
1614 armnn::PadDescriptor& padDescriptor)
1615{
1616 using HalOperand = typename HalPolicy::Operand;
1617
1618 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1619 if (!paddingsOperand)
1620 {
1621 return Fail("%s: Could not read paddings operand", __func__);
1622 }
1623
1624 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1625 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1626 {
1627 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1628 }
1629
1630 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001631 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1632 {
1633 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1634 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001635
1636 // add padding for each dimension of input tensor.
1637 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1638 {
1639 int paddingBeforeInput = paddings[i];
1640 int paddingAfterInput = paddings[i + 1];
1641
1642 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1643 {
1644 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1645 }
1646
1647 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1648 }
1649
1650 return true;
1651}
1652
1653template<typename HalPolicy,
1654 typename HalOperation = typename HalPolicy::Operation,
1655 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001656bool ConvertPooling2d(const HalOperation& operation,
1657 const char* operationName,
1658 armnn::PoolingAlgorithm poolType,
1659 const HalModel& model,
1660 ConversionData& data)
1661{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001662 using HalOperand = typename HalPolicy::Operand;
1663 using HalOperandType = typename HalPolicy::OperandType;
1664
1665 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001666 if (!input.IsValid())
1667 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001668 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001669 }
1670
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001671 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001672 if (!output)
1673 {
1674 return Fail("%s: Could not read output 0", __func__);
1675 }
1676
1677 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1678 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1679
arovir01b0717b52018-09-05 17:03:25 +01001680 armnn::Pooling2dDescriptor desc;
1681 desc.m_PoolType = poolType;
1682 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001683 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001684
1685 ActivationFn activation;
1686
Sadik Armagan15d63e22019-07-26 16:59:35 +01001687 auto inputSize = operation.inputs.size();
1688
1689 if (inputSize >= 10)
1690 {
1691 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1692 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1693 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1694 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1695 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1696 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1697 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1698 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1699 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1700 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1701 {
1702 return Fail("%s: Operation has invalid inputs", operationName);
1703 }
1704
Kevin May42477c12020-03-26 13:34:14 +00001705 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001706 {
1707 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1708 }
1709 }
1710 else
arovir01b0717b52018-09-05 17:03:25 +01001711 {
1712 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1713 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001714 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1715 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1716 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1717 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1718 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1719 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001720 {
1721 return Fail("%s: Operation has invalid inputs", operationName);
1722 }
1723
Kevin May42477c12020-03-26 13:34:14 +00001724 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001725 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001726 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001727 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001728
1729 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1730 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1731 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1732
1733 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1734 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001735 }
1736
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001737 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001738
1739 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1740 {
1741 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1742 IsPooling2dSupported,
1743 data.m_Backends,
1744 isSupported,
1745 inputInfo,
1746 outputInfo,
1747 desc);
1748
1749 };
1750
1751 if(IsDynamicTensor(outputInfo))
1752 {
1753 isSupported = AreDynamicTensorsSupported();
1754 }
1755 else
1756 {
1757 validateFunc(outputInfo, isSupported);
1758 }
1759
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001760 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001761 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001762 return false;
arovir01b0717b52018-09-05 17:03:25 +01001763 }
arovir01b0717b52018-09-05 17:03:25 +01001764
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001765 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1766 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001767 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001768 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001769 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001770
1771 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1772 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001773 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001774 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001775 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001776
1777 input.Connect(pooling2dLayer->GetInputSlot(0));
1778
Finn Williamsa4983ce2020-07-23 12:55:12 +01001779 if (!isSupported)
1780 {
1781 return false;
1782 }
1783
1784 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001785}
1786
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001787template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001788 typename HalOperation = typename HalPolicy::Operation,
1789 typename HalModel = typename HalPolicy::Model>
1790bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001791{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001792 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001793
1794 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1795 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1796
1797 if (!input0.IsValid() || !input1.IsValid())
1798 {
1799 return Fail("%s: Operation has invalid inputs", __func__);
1800 }
1801
1802 // The FuseActivation parameter is always the input index 2
1803 // and it should be optional
1804 ActivationFn activationFunction;
1805 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1806 {
1807 return Fail("%s: Operation has invalid inputs", __func__);
1808 }
1809
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001810 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001811 if (!outputOperand)
1812 {
1813 return false;
1814 }
1815
1816 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1817 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1818
1819 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001820
1821 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001822 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1823 {
1824 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1825 IsAdditionSupported,
1826 data.m_Backends,
1827 isSupported,
1828 inputInfo0,
1829 inputInfo1,
1830 outputInfo);
1831 };
1832
1833 if(!IsDynamicTensor(outputInfo))
1834 {
1835 validateFunc(outputInfo, isSupported);
1836 }
1837 else
1838 {
1839 isSupported = AreDynamicTensorsSupported();
1840 }
1841
Mike Kelly46272802019-08-14 17:00:48 +01001842 if (!isSupported)
1843 {
1844 return false;
1845 }
1846
1847 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1848 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1849
1850 if (endLayer != nullptr)
1851 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001852 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001853 if (!isReshapeSupported)
1854 {
1855 return false;
1856 }
1857
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001858 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01001859 }
1860 else
1861 {
1862 return Fail("%s: ProcessActivation failed", __func__);
1863 }
1864}
1865
1866template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001867 typename HalOperation = typename HalPolicy::Operation,
1868 typename HalModel = typename HalPolicy::Model>
1869bool ConvertArgMinMax(const HalOperation& operation,
1870 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001871 ConversionData& data,
1872 armnn::ArgMinMaxFunction argMinMaxFunction)
1873{
1874 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1875
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001876 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001877 using HalOperandType = typename HalPolicy::OperandType;
1878
1879 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1880
1881 if (!input0.IsValid())
1882 {
1883 return Fail("%s: Operation has invalid inputs", __func__);
1884 }
1885
1886 int32_t axis;
1887 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1888 {
1889 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1890 }
1891
1892 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1893 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1894
1895 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1896 {
1897 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1898 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1899 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1900 return Fail("%s: Axis must be in range [-n, n)", __func__);
1901 }
1902
1903 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1904 if (!output)
1905 {
1906 return Fail("%s: Could not read output 0", __func__);
1907 }
1908
1909 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1910
1911 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001912
1913 armnn::ArgMinMaxDescriptor descriptor;
1914 descriptor.m_Function = argMinMaxFunction;
1915 descriptor.m_Axis = axis;
1916
1917 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001918
1919 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1920 {
1921 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1922 IsArgMinMaxSupported,
1923 data.m_Backends,
1924 isSupported,
1925 inputInfo0,
1926 outputInfo,
1927 descriptor);
1928 };
1929
1930 if(IsDynamicTensor(outputInfo))
1931 {
1932 isSupported = AreDynamicTensorsSupported();
1933 }
1934 else
1935 {
1936 validateFunc(outputInfo, isSupported);
1937 }
1938
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001939 if (!isSupported)
1940 {
1941 return false;
1942 }
1943
1944 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1945 assert(layer != nullptr);
1946
1947 input0.Connect(layer->GetInputSlot(0));
1948
Finn Williamsa4983ce2020-07-23 12:55:12 +01001949 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001950}
1951
1952template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001953 typename HalOperation = typename HalPolicy::Operation,
1954 typename HalModel = typename HalPolicy::Model>
1955bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001956{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001957 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001958 using HalOperandType = typename HalPolicy::OperandType;
1959
1960 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1961 if (operation.inputs.size() <= 1)
1962 {
1963 return Fail("%s: Operation has insufficient arguments", __func__);
1964 }
1965
1966 // Get inputs and outputs
1967 const std::size_t numInputTensors = operation.inputs.size() - 1;
1968
1969 int32_t concatDim;
1970 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1971 {
1972 return Fail("%s: Operation has invalid inputs", __func__);
1973 }
1974
1975 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1976 if (!outputOperand)
1977 {
1978 return Fail("%s: Operation has no outputs", __func__);
1979 }
1980
Mike Kellyb8805202019-07-31 17:25:43 +01001981 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1982 armnn::TensorShape outputShape = outputInfo.GetShape();
1983
1984 //
1985 // handle negative concat dims along the lines of tensorflow as described here:
1986 // https://www.tensorflow.org/api_docs/python/tf/concat
1987 // "negative axis refers to axis + rank(values)-th dimension"
1988 //
1989 if (concatDim < 0)
1990 {
1991 concatDim += outputShape.GetNumDimensions();
1992 }
1993
1994 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1995 {
1996 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1997 }
1998
1999 std::vector<LayerInputHandle> inputHandles;
2000 std::vector<armnn::TensorShape> inputShapes;
2001
2002 inputHandles.reserve(numInputTensors);
2003 inputShapes.reserve(numInputTensors);
2004
2005 bool inputsHaveBeenReshaped = false;
2006 unsigned int tensorDimensionsAdded = 0;
2007
2008 for (uint32_t i = 0; i < numInputTensors; ++i)
2009 {
2010 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2011 if (!operand)
2012 {
2013 return Fail("%s: Operation has invalid inputs", __func__);
2014 }
2015
Teresa Charlin3b959602019-10-31 17:05:47 +00002016 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2017 if (!operandInputHandle.IsValid())
2018 {
2019 return Fail("%s: Operation has invalid inputs", __func__);
2020 }
Mike Kellyb8805202019-07-31 17:25:43 +01002021
Teresa Charlin3b959602019-10-31 17:05:47 +00002022 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002023 if (operandShape.GetNumDimensions() == 0)
2024 {
2025 return Fail("%s: Operands with rank 0 are not supported", __func__);
2026 }
2027
2028 if (RequiresReshape(operandShape))
2029 {
2030 inputsHaveBeenReshaped = true;
2031
2032 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2033
2034 // Expand the tensor to three dimensions
2035 if (operandShape.GetNumDimensions() == 2)
2036 {
2037 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2038 tensorDimensionsAdded = 1;
2039 }
2040 else
2041 {
2042 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2043 tensorDimensionsAdded = 2;
2044 }
2045
Kevin Mayaed08ac2019-12-12 16:33:31 +00002046 armnn::ReshapeDescriptor reshapeDescriptor;
2047 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2048
2049 bool isSupported = false;
2050 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2051 IsReshapeSupported,
2052 data.m_Backends,
2053 isSupported,
2054 operandInputHandle.GetTensorInfo(),
2055 reshapeInfo,
2056 reshapeDescriptor);
2057 if (!isSupported)
2058 {
2059 return false;
2060 }
2061
Mike Kellyb8805202019-07-31 17:25:43 +01002062 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
2063 *data.m_Network,
2064 operandInputHandle,
2065 reshapeInfo
2066 );
2067
2068 // Point to the reshape operation rather then the input operation
2069 operandShape = reshapeInfo.GetShape();
2070 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2071 }
2072
2073 inputShapes.emplace_back(operandShape);
2074 inputHandles.emplace_back(operandInputHandle);
2075
2076 if (!inputHandles.back().IsValid())
2077 {
2078 return Fail("%s: Operation has invalid inputs", __func__);
2079 }
2080 }
2081
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002082 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002083
2084 if (inputsHaveBeenReshaped)
2085 {
2086 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2087 concatDim += tensorDimensionsAdded;
2088
2089 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2090 if (tensorDimensionsAdded == 1)
2091 {
2092 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2093 }
2094 else if (tensorDimensionsAdded == 2)
2095 {
2096 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2097 }
2098 }
2099
2100 // Check if permutations is required and get the pair of permutations required for the concatenation.
2101 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2102 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2103 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2104
2105 bool needPermute =
2106 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2107
2108 if (needPermute)
2109 {
Mike Kelly4a956582020-02-28 10:32:09 +00002110 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002111 }
2112
2113 outputInfo.SetShape(outputShape);
2114
2115 // this is no-op for identity swizzles, otherwise it replaces both
2116 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002117 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002118 {
2119 return false;
2120 }
Mike Kellyb8805202019-07-31 17:25:43 +01002121
2122 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2123 armnn::OriginsDescriptor concatDescriptor;
2124
2125 try
2126 {
2127 // The concat descriptor is always created across the only supported concat dimension
2128 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2129 concatDescriptor =
2130 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2131 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002132 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002133 {
2134 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2135 }
2136
2137 // Validate the output shape is correct given the input shapes based on the
2138 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2139 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2140 {
2141 return Fail("%s: Error validating the output shape for concat", __func__);
2142 }
2143
2144 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2145 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2146 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2147
2148 bool isSupported = false;
2149 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2150 IsConcatSupported,
2151 data.m_Backends,
2152 isSupported,
2153 inputTensorInfos,
2154 outputInfo,
2155 concatDescriptor);
2156 if (!isSupported)
2157 {
2158 return false;
2159 }
2160
2161 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2162 assert(layer != nullptr);
2163 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2164
2165 // Connect inputs to the layer
2166 const int numInputSlots = layer->GetNumInputSlots();
2167 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2168 for (int i = 0; i < numInputSlots; ++i)
2169 {
2170 // connect the input directly to the merge (concat) layer
2171 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2172 }
2173
2174 if (needPermute)
2175 {
Mike Kelly4a956582020-02-28 10:32:09 +00002176 armnn::TransposeDescriptor transposeDesc;
2177 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002178 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2179 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2180 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002181
2182 bool isSupported = false;
2183 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002184 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002185 data.m_Backends,
2186 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002187 inputTransposeInfo,
2188 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002189 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002190 if (!isSupported)
2191 {
2192 return false;
2193 }
Mike Kellyb8805202019-07-31 17:25:43 +01002194 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002195 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2196 layer->GetOutputSlot(0),
2197 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002198 layer = &deswizzleLayer;
2199 }
2200
2201 if (inputsHaveBeenReshaped)
2202 {
2203 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2204
2205 // Undo the reshape knowing the amount of dimensions added
2206 if (tensorDimensionsAdded == 1)
2207 {
2208 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2209 afterConcatInfo.GetShape()[2] }));
2210 }
2211 else if (tensorDimensionsAdded == 2)
2212 {
2213 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2214 }
2215
Kevin Mayaed08ac2019-12-12 16:33:31 +00002216 armnn::ReshapeDescriptor reshapeDescriptor;
2217 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2218
2219 bool isSupported = false;
2220 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2221 IsReshapeSupported,
2222 data.m_Backends,
2223 isSupported,
2224 layer->GetOutputSlot(0).GetTensorInfo(),
2225 afterConcatInfo,
2226 reshapeDescriptor);
2227 if (!isSupported)
2228 {
2229 return false;
2230 }
2231
Mike Kellyb8805202019-07-31 17:25:43 +01002232 layer = &AddReshapeLayer(
2233 *data.m_Network,
2234 layer->GetOutputSlot(0),
2235 afterConcatInfo
2236 );
2237 }
2238
2239 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2240}
2241
2242template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002243 typename HalOperation = typename HalPolicy::Operation,
2244 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002245bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2246{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002247 using HalOperand = typename HalPolicy::Operand;
2248 using HalOperandType = typename HalPolicy::OperandType;
2249
2250 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002251 if (!input.IsValid())
2252 {
2253 return Fail("%s: Operation has invalid inputs", __func__);
2254 }
2255
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002256 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002257 if (!output)
2258 {
2259 return Fail("%s: Could not read output 0", __func__);
2260 }
2261
2262 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002263 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002264
2265 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002266 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2267 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002268
2269 if (!weightsPin.IsValid() || !biasPin.IsValid())
2270 {
2271 return Fail("%s: Operation has invalid inputs", __func__);
2272 }
2273
2274 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002275 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002276 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2277
2278 armnn::Convolution2dDescriptor desc;
2279 desc.m_DataLayout = armnn::DataLayout::NHWC;
2280 ActivationFn activation;
2281
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002282 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002283 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002284 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2285 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2286 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2287 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2288 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2289 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002290 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002291 {
2292 return Fail("%s: Operation has invalid inputs", __func__);
2293 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002294 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002295 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002296 {
2297 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002298 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2299 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2300 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002301 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002302 {
2303 return Fail("%s: Operation has invalid inputs", __func__);
2304 }
2305
2306 const uint32_t kernelX = weights.GetShape()[2];
2307 const uint32_t kernelY = weights.GetShape()[1];
2308 const uint32_t inputX = inputInfo.GetShape()[2];
2309 const uint32_t inputY = inputInfo.GetShape()[1];
2310
2311 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2312 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002313 }
2314 else
2315 {
2316 return Fail("%s: Unsupported number of operation inputs", __func__);
2317 }
2318
2319 desc.m_BiasEnabled = true;
2320 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2321
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002322 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002323 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2324 {
2325 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2326 IsConvolution2dSupported,
2327 data.m_Backends,
2328 isSupported,
2329 inputInfo,
2330 outputInfo,
2331 desc,
2332 weights.GetInfo(),
2333 biases);
2334 };
2335
2336 if(!IsDynamicTensor(outputInfo))
2337 {
2338 validateFunc(outputInfo, isSupported);
2339 }
2340 else
2341 {
2342 isSupported = AreDynamicTensorsSupported();
2343 }
2344
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002345 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002346 {
2347 return false;
2348 }
2349
2350 armnn::IConnectableLayer* startLayer =
2351 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2352
2353 if (!startLayer)
2354 {
2355 return Fail("%s: AddConvolution2dLayer failed", __func__);
2356 }
2357
2358 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2359
2360 if (!endLayer)
2361 {
2362 return Fail("%s: ProcessActivation failed", __func__);
2363 }
2364
2365 input.Connect(startLayer->GetInputSlot(0));
2366
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002367 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002368}
2369
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002370template<typename HalPolicy,
2371 typename HalOperation = typename HalPolicy::Operation,
2372 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002373bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2374{
2375 using HalOperand = typename HalPolicy::Operand;
2376 using HalOperandType = typename HalPolicy::OperandType;
2377
2378 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2379 if (!input.IsValid() )
2380 {
2381 return Fail("%s: Operation has invalid inputs", __func__);
2382 }
2383
2384 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2385 unsigned int rank = inputInfo.GetNumDimensions();
2386 if (rank != 4)
2387 {
2388 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2389 }
2390
2391 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2392 if (!output)
2393 {
2394 return Fail("%s: Could not read output 0", __func__);
2395 }
2396
2397 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002398
2399 armnn::DepthToSpaceDescriptor descriptor;
2400
2401 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2402 if (descriptor.m_BlockSize <= 1)
2403 {
2404 return Fail("%s: Block size must be at least 1 in all dimensions");
2405 }
2406
2407 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002408 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002409 {
2410 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2411 }
2412
2413 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002414 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2415 {
2416 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2417 IsDepthToSpaceSupported,
2418 data.m_Backends,
2419 isSupported,
2420 inputInfo,
2421 outputInfo,
2422 descriptor);
2423 };
2424
2425 if(!IsDynamicTensor(outputInfo))
2426 {
2427 validateFunc(outputInfo, isSupported);
2428 }
2429 else
2430 {
2431 isSupported = AreDynamicTensorsSupported();
2432 }
2433
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002434 if (!isSupported)
2435 {
2436 return false;
2437 }
2438
2439 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2440 assert(layer != nullptr);
2441 input.Connect(layer->GetInputSlot(0));
2442
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002443 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002444}
2445
2446template<typename HalPolicy,
2447 typename HalOperation = typename HalPolicy::Operation,
2448 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002449bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2450{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002451 using HalOperand = typename HalPolicy::Operand;
2452 using HalOperandType = typename HalPolicy::OperandType;
2453
2454 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002455
2456 if (!input.IsValid())
2457 {
2458 return Fail("%s: Operation has invalid inputs", __func__);
2459 }
2460
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002461 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002462
2463 if (!output)
2464 {
2465 return Fail("%s: Could not read output 0", __func__);
2466 }
2467
2468 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002469 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002470
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002471 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002472 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002473 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002474
2475 if (weightsOperand == nullptr)
2476 {
2477 return Fail("%s: Operand is invalid", __func__);
2478 }
2479 armnn::DepthwiseConvolution2dDescriptor desc;
2480 desc.m_DataLayout = armnn::DataLayout::NHWC;
2481
Mike Kellyb5fdf382019-06-11 16:35:25 +01002482 // Reinterpret weight data as [ H, W, I, M ]
2483 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2484 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002485 inputInfo.GetShape()[3],
2486 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002487
2488 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2489 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2490
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002491 const ConstTensorPin weightsPin =
2492 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2493 1,
2494 model,
2495 data,
2496 HWIMToMIHW,
2497 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002498
2499 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002500 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002501
2502 if (!weightsPin.IsValid() || !biasPin.IsValid())
2503 {
2504 return Fail("%s: Operation has invalid inputs", __func__);
2505 }
2506
2507 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2508 armnn::ConstTensor bias = biasPin.GetConstTensor();
2509 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2510
2511 ActivationFn activation;
2512
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002513 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002514 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002515 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2516 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2517 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2518 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2519 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2520 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002521 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002522 {
2523 return Fail("%s: Operation has invalid inputs", __func__);
2524 }
2525 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002526 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002527 {
2528 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002529 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2530 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2531 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002532 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002533 {
2534 return Fail("%s: Operation has invalid inputs", __func__);
2535 }
2536
2537 const uint32_t kernelX = weights.GetShape()[3];
2538 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002539 const uint32_t inputX = inputInfo.GetShape()[2];
2540 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002541
2542 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2543 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2544 }
2545 else
2546 {
2547 return Fail("%s: Unsupported number of operation inputs", __func__);
2548 }
2549
2550 desc.m_BiasEnabled = true;
2551 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2552
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002553 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002554 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2555 {
2556 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2557 IsDepthwiseConvolutionSupported,
2558 data.m_Backends,
2559 isSupported,
2560 inputInfo,
2561 outputInfo,
2562 desc,
2563 weights.GetInfo(),
2564 biases);
2565 };
2566
2567 if(!IsDynamicTensor(outputInfo))
2568 {
2569 validateFunc(outputInfo, isSupported);
2570 }
2571 else
2572 {
2573 isSupported = AreDynamicTensorsSupported();
2574 }
2575
2576
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002577 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002578 {
2579 return false;
2580 }
2581
2582 armnn::IConnectableLayer* startLayer =
2583 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2584 if (!startLayer)
2585 {
2586 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2587 }
2588
2589 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2590 if (!endLayer)
2591 {
2592 return Fail("%s: ProcessActivation failed", __func__);
2593 }
2594
2595 input.Connect(startLayer->GetInputSlot(0));
2596
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002597 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01002598}
2599
Mike Kelly3c673942019-07-25 09:26:06 +01002600template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002601 typename HalOperation = typename HalPolicy::Operation,
2602 typename HalModel = typename HalPolicy::Model>
2603bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002604{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002605 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002606
2607 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2608 if (!input.IsValid())
2609 {
2610 return Fail("%s: Operation has invalid input", __func__);
2611 }
2612
Sadik Armagan98c0f662019-11-21 15:54:36 +00002613 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2614 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2615 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2616 {
2617 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2618 }
2619
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002620 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002621 if (!outputOperand)
2622 {
2623 return Fail("%s: Operation has invalid outputs", __func__);
2624 }
2625
2626 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002627
2628 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002629 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2630 {
2631 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2632 IsDequantizeSupported,
2633 data.m_Backends,
2634 isSupported,
2635 inputInfo,
2636 outputInfo);
2637 };
2638
2639 if(IsDynamicTensor(outputInfo))
2640 {
2641 isSupported = AreDynamicTensorsSupported();
2642 }
2643 else
2644 {
2645 validateFunc(outputInfo, isSupported);
2646 }
2647
Mike Kelly46272802019-08-14 17:00:48 +01002648 if (!isSupported)
2649 {
2650 return false;
2651 }
2652
2653 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2654 assert(layer != nullptr);
2655 input.Connect(layer->GetInputSlot(0));
2656
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002657 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002658}
2659
2660template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002661 typename HalOperation = typename HalPolicy::Operation,
2662 typename HalModel = typename HalPolicy::Model>
2663bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002664{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002665 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002666
2667 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2668 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2669
2670 if (!input0.IsValid() || !input1.IsValid())
2671 {
2672 return Fail("%s: Operation has invalid inputs", __func__);
2673 }
2674
2675 // The FuseActivation parameter is always the input index 2
2676 // and it should be optional
2677 ActivationFn activationFunction;
2678 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2679 {
2680 return Fail("%s: Operation has invalid inputs", __func__);
2681 }
2682
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002683 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002684 if (!output)
2685 {
2686 return Fail("%s: Could not read output 0", __func__);
2687 }
2688
2689 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002690
2691 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002692 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2693 {
2694 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2695 IsDivisionSupported,
2696 data.m_Backends,
2697 isSupported,
2698 input0.GetTensorInfo(),
2699 input1.GetTensorInfo(),
2700 outputInfo);
2701 };
2702
2703 if(!IsDynamicTensor(outputInfo))
2704 {
2705 validateFunc(outputInfo, isSupported);
2706 }
2707 else
2708 {
2709 isSupported = AreDynamicTensorsSupported();
2710 }
2711
Mike Kelly46272802019-08-14 17:00:48 +01002712 if (!isSupported)
2713 {
2714 return false;
2715 }
2716
2717 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2718 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2719
2720 if (endLayer)
2721 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002722 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002723 if (!isReshapeSupported)
2724 {
2725 return false;
2726 }
2727
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002728 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002729 }
2730 return Fail("%s: ProcessActivation failed", __func__);
2731}
2732
2733template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002734 typename HalOperation = typename HalPolicy::Operation,
2735 typename HalModel = typename HalPolicy::Model>
2736bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002737{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002738 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002739
2740 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2741 if (!input.IsValid())
2742 {
2743 return Fail("%s: Operation has invalid inputs", __func__);
2744 }
2745
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002746 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002747 if (!outputOperand)
2748 {
2749 return Fail("%s: Operation has invalid outputs", __func__);
2750 }
2751
2752 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002753
2754 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002755 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2756 {
2757 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2758 IsFloorSupported,
2759 data.m_Backends,
2760 isSupported,
2761 input.GetTensorInfo(),
2762 outputInfo);
2763 };
2764
2765 if(!IsDynamicTensor(outputInfo))
2766 {
2767 validateFunc(outputInfo, isSupported);
2768 }
2769 else
2770 {
2771 isSupported = AreDynamicTensorsSupported();
2772 }
2773
Mike Kelly46272802019-08-14 17:00:48 +01002774 if (!isSupported)
2775 {
2776 return false;
2777 }
2778
2779 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2780 assert(layer != nullptr);
2781 input.Connect(layer->GetInputSlot(0));
2782
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002783 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002784}
2785
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002786inline bool IsQSymm8(const V1_0::Operand&)
2787{
2788 return false;
2789}
2790
Kevin May42477c12020-03-26 13:34:14 +00002791#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002792
2793inline bool IsQSymm8(const V1_2::Operand& operand)
2794{
2795 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2796}
2797
2798#endif
2799
Kevin May42477c12020-03-26 13:34:14 +00002800#ifdef ARMNN_ANDROID_NN_V1_3
2801
2802inline bool IsQSymm8(const V1_3::Operand& operand)
2803{
2804 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2805}
2806
2807#endif
2808
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002809enum class DequantizeStatus
2810{
2811 SUCCESS,
2812 NOT_REQUIRED,
2813 INVALID_OPERAND
2814};
2815
2816using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2817
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002818template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002819 typename HalOperation = typename HalPolicy::Operation,
2820 typename HalModel = typename HalPolicy::Model>
2821DequantizeResult DequantizeIfRequired(size_t operand_index,
2822 const HalOperation& operation,
2823 const HalModel& model,
2824 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002825{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002826 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002827
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002828 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002829 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002830 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002831 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002832 }
2833
2834 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2835 {
2836 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002837 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002838 }
2839
2840 const size_t weightsInputIndex = operation.inputs[operand_index];
2841
2842 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2843 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002844 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002845 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002846 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002847 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002848 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2849 {
2850 continue;
2851 }
2852
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002853 size_t outOpIndex = weightsInputIndex + 1;
2854 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002855 {
2856 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002857 }
2858
2859 if (outOpIndex != weightsInputIndex)
2860 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002861 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002862 }
2863
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002864 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002865 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002866
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002867 if (!IsQSymm8(*operand))
2868 {
2869 // Only supporting dequantize from QSYMM8 to FLOAT
2870 break;
2871 }
2872
2873 // Allocate a new buffer for the dequantized data and manually dequantize
2874 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2875 if (!startValue)
2876 {
2877 // Failed to get the operand address
2878 break;
2879 }
2880
2881 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2882 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002883 const float quantizationScale = operand->scale;
2884
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002885 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2886 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2887 {
2888 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002889 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002890 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2891 }
2892
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002893 // Construct tensor info for dequantized ConstTensor
2894 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2895 operand->dimensions.data(),
2896 armnn::DataType::Float32);
2897
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002898 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2899 std::move(tensorInfo),
2900 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002901 }
2902
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002903 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002904}
2905
2906template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002907 typename HalOperation = typename HalPolicy::Operation,
2908 typename HalModel = typename HalPolicy::Model>
2909ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2910 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002911 const ConversionData& data,
2912 size_t operandIndex,
2913 bool optional = false)
2914{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002915 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2916
2917 DequantizeStatus status = std::get<3>(dequantized);
2918 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002919 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002920 case DequantizeStatus::INVALID_OPERAND:
2921 {
2922 // return invalid const tensor pin
2923 return ConstTensorPin();
2924 }
2925 case DequantizeStatus::NOT_REQUIRED:
2926 {
2927 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2928 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2929 }
2930 case DequantizeStatus::SUCCESS:
2931 default:
2932 {
2933 return ConstTensorPin(
2934 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2935 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002936 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002937}
2938
2939
Mike Kelly46272802019-08-14 17:00:48 +01002940template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002941 typename HalOperation = typename HalPolicy::Operation,
2942 typename HalModel = typename HalPolicy::Model>
2943bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002944{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002945 using HalOperand = typename HalPolicy::Operand;
2946
Mike Kelly46272802019-08-14 17:00:48 +01002947 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2948 if (!input.IsValid())
2949 {
2950 return Fail("%s: Operation has invalid inputs", __func__);
2951 }
2952
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002953 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002954 if (!output)
2955 {
2956 return Fail("%s: Could not read output 0", __func__);
2957 }
2958
2959 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2960 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2961
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002962 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2963 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002964
2965 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002966 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002967 return Fail("%s: Operation has invalid weights", __func__);
2968 }
2969
2970 if (!biasPin.IsValid())
2971 {
2972 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002973 }
2974
2975 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2976 armnn::ConstTensor bias = biasPin.GetConstTensor();
2977 armnn::TensorInfo reshapedInfo = inputInfo;
2978
2979 try
2980 {
2981 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002982 }
2983 catch (const std::exception& e)
2984 {
Mike Kelly46272802019-08-14 17:00:48 +01002985 return Fail("%s: %s", __func__, e.what());
2986 }
2987
2988 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2989 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2990
2991 ActivationFn activationFunction;
2992 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2993 {
2994 return Fail("%s: Operation has invalid inputs", __func__);
2995 }
2996
2997 armnn::FullyConnectedDescriptor desc;
2998 desc.m_TransposeWeightMatrix = true;
2999 desc.m_BiasEnabled = true;
3000
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00003001 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3002 weights.GetInfo().GetShape(),
3003 outputInfo.GetShape(),
3004 desc.m_TransposeWeightMatrix))
3005 {
3006 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3007 }
3008
Mike Kelly46272802019-08-14 17:00:48 +01003009 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003010 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3011 {
3012 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly46272802019-08-14 17:00:48 +01003013 IsFullyConnectedSupported,
3014 data.m_Backends,
3015 isSupported,
3016 reshapedInfo,
3017 outputInfo,
3018 weights.GetInfo(),
3019 bias.GetInfo(),
3020 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003021 };
3022
3023 if(!IsDynamicTensor(outputInfo))
3024 {
3025 validateFunc(outputInfo, isSupported);
3026 }
3027 else
3028 {
3029 isSupported = AreDynamicTensorsSupported();
3030 }
3031
Mike Kelly46272802019-08-14 17:00:48 +01003032 if (!isSupported)
3033 {
3034 return false;
3035 }
3036
3037 armnn::IConnectableLayer* startLayer =
3038 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
3039 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3040
3041 if (endLayer != nullptr)
3042 {
3043 if (inputInfo.GetNumDimensions() > 2U)
3044 {
3045 armnn::ReshapeDescriptor reshapeDescriptor;
3046 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
3047
3048 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3049 assert(reshapeLayer != nullptr);
3050 input.Connect(reshapeLayer->GetInputSlot(0));
3051 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3052 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
3053 }
3054 else
3055 {
3056 input.Connect(startLayer->GetInputSlot(0));
3057 }
3058
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003059 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003060 }
3061 else
3062 {
3063 return Fail("%s: ProcessActivation failed", __func__);
3064 }
3065}
3066
3067template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003068 typename HalOperation = typename HalPolicy::Operation,
3069 typename HalModel = typename HalPolicy::Model>
3070bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003071{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003072 using HalOperand = typename HalPolicy::Operand;
3073
Mike Kelly999e2092019-08-15 10:46:46 +01003074 if (operation.inputs.size() != 1)
3075 {
3076 return Fail("%s: Optional inputs are not supported", __func__);
3077 }
3078
Mike Kelly46272802019-08-14 17:00:48 +01003079 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3080 if (!input.IsValid())
3081 {
3082 return Fail("%s: Operation has invalid inputs", __func__);
3083 }
3084
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003085 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003086 if (!output)
3087 {
3088 return Fail("%s: Could not read output 0", __func__);
3089 }
3090
3091 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3092 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3093
Mike Kelly46272802019-08-14 17:00:48 +01003094 if (outputInfo.GetNumDimensions() != 4u)
3095 {
3096 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3097 }
3098
3099 armnn::L2NormalizationDescriptor desc;
3100 desc.m_DataLayout = armnn::DataLayout::NHWC;
3101
3102 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003103 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3104 {
3105 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3106 IsL2NormalizationSupported,
3107 data.m_Backends,
3108 isSupported,
3109 inputInfo,
3110 outputInfo,
3111 desc);
3112 };
3113
3114 if(!IsDynamicTensor(outputInfo))
3115 {
3116 validateFunc(outputInfo, isSupported);
3117 }
3118 else
3119 {
3120 isSupported = AreDynamicTensorsSupported();
3121 }
3122
Mike Kelly46272802019-08-14 17:00:48 +01003123 if (!isSupported)
3124 {
3125 return false;
3126 }
3127
3128 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3129 assert(layer != nullptr);
3130 input.Connect(layer->GetInputSlot(0));
3131
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003132 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003133}
3134
3135template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003136 typename HalOperation = typename HalPolicy::Operation,
3137 typename HalModel = typename HalPolicy::Model>
3138bool ConvertLocalResponseNormalization(const HalOperation& operation,
3139 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003140 ConversionData& data)
3141{
Mike Kelly999e2092019-08-15 10:46:46 +01003142 if (operation.inputs.size() != 5)
3143 {
3144 return Fail("%s: Optional inputs are not supported", __func__);
3145 }
3146
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003147 using HalOperand = typename HalPolicy::Operand;
3148 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003149
3150 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3151 if (!input.IsValid())
3152 {
3153 return Fail("%s: Operation has invalid inputs", __func__);
3154 }
3155
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003156 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003157 if (!output)
3158 {
3159 return Fail("%s: Could not read output 0", __func__);
3160 }
3161
3162 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3163 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3164
Mike Kelly46272802019-08-14 17:00:48 +01003165 if (outputInfo.GetNumDimensions() != 4u)
3166 {
3167 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3168 }
3169
3170 armnn::NormalizationDescriptor descriptor;
3171 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3172 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3173 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3174
3175 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003176 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003177 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3178 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3179 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3180 {
3181 return Fail("%s: Operation has invalid inputs", __func__);
3182 }
3183
3184 // ArmNN expects normSize to be the full size of the normalization
3185 // window rather than the radius as in AndroidNN.
3186 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3187
3188 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003189 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3190 {
3191 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3192 IsNormalizationSupported,
3193 data.m_Backends,
3194 isSupported,
3195 inputInfo,
3196 outputInfo,
3197 descriptor);
3198 };
3199
3200 if(!IsDynamicTensor(outputInfo))
3201 {
3202 validateFunc(outputInfo, isSupported);
3203 }
3204 else
3205 {
3206 isSupported = AreDynamicTensorsSupported();
3207 }
3208
Mike Kelly46272802019-08-14 17:00:48 +01003209 if (!isSupported)
3210 {
3211 return false;
3212 }
3213
3214
3215 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3216 assert(layer != nullptr);
3217 input.Connect(layer->GetInputSlot(0));
3218
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003219 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003220}
3221
3222template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003223 typename HalOperation = typename HalPolicy::Operation,
3224 typename HalModel = typename HalPolicy::Model>
3225bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003226{
Mike Kelly46272802019-08-14 17:00:48 +01003227 armnn::ActivationDescriptor desc;
3228 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3229
3230 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3231}
3232
3233template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003234 typename HalOperation = typename HalPolicy::Operation,
3235 typename HalModel = typename HalPolicy::Model>
3236bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003237{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003238 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003239
3240 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3241 if (!input.IsValid())
3242 {
3243 return Fail("%s: Operation has invalid inputs", __func__);
3244 }
3245
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003246 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003247 if (!output)
3248 {
3249 return Fail("%s: Could not read output 0", __func__);
3250 }
3251
3252 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003253
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003254 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003255 if (!axisOperand)
3256 {
3257 return Fail("%s: Could not read input 1", __func__);
3258 }
3259
3260 std::vector<int32_t> axis;
3261 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3262 {
3263 return Fail("%s: Input 1 has invalid values", __func__);
3264 }
3265
3266 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3267
3268 // Convert the axis to unsigned int and remove duplicates.
3269 unsigned int rank = inputInfo.GetNumDimensions();
3270 std::set<unsigned int> uniqueAxis;
3271 std::transform(axis.begin(), axis.end(),
3272 std::inserter(uniqueAxis, uniqueAxis.begin()),
3273 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3274
3275 // Get the "keep dims" flag.
3276 int32_t keepDims = 0;
3277 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3278 {
3279 return Fail("%s: Could not read input 2", __func__);
3280 }
3281
3282 armnn::MeanDescriptor descriptor;
3283 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3284 descriptor.m_KeepDims = keepDims > 0;
3285
3286 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003287 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3288 {
3289 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3290 IsMeanSupported,
3291 data.m_Backends,
3292 isSupported,
3293 inputInfo,
3294 outputInfo,
3295 descriptor);
3296 };
3297
3298 if(!IsDynamicTensor(outputInfo))
3299 {
3300 validateFunc(outputInfo, isSupported);
3301 }
3302 else
3303 {
3304 isSupported = AreDynamicTensorsSupported();
3305 }
3306
Mike Kelly46272802019-08-14 17:00:48 +01003307 if (!isSupported)
3308 {
3309 return false;
3310 }
3311
3312 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3313 assert(layer != nullptr);
3314 input.Connect(layer->GetInputSlot(0));
3315
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003316 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003317}
3318
3319template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003320 typename HalOperation = typename HalPolicy::Operation,
3321 typename HalModel = typename HalPolicy::Model>
3322bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003323{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003324 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003325
3326 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3327 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3328
3329 if (!input0.IsValid() || !input1.IsValid())
3330 {
3331 return Fail("%s: Operation has invalid inputs", __func__);
3332 }
3333
3334 // The FuseActivation parameter is always the input index 2
3335 // and it should be optional
3336 ActivationFn activationFunction;
3337 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3338 {
3339 return Fail("%s: Operation has invalid inputs", __func__);
3340 }
3341
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003342 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003343
3344 if (outputOperand == nullptr)
3345 {
3346 return false;
3347 }
3348
3349 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003350
3351 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003352 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3353 {
3354 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3355 IsMultiplicationSupported,
3356 data.m_Backends,
3357 isSupported,
3358 input0.GetTensorInfo(),
3359 input1.GetTensorInfo(),
3360 outputInfo);
3361 };
3362
3363 if(!IsDynamicTensor(outputInfo))
3364 {
3365 validateFunc(outputInfo, isSupported);
3366 }
3367 else
3368 {
3369 isSupported = AreDynamicTensorsSupported();
3370 }
3371
Mike Kelly46272802019-08-14 17:00:48 +01003372 if (!isSupported)
3373 {
3374 return false;
3375 }
3376
3377 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3378 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3379
3380 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3381 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3382
3383 if (endLayer != nullptr)
3384 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003385 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003386 if (!isReshapeSupported)
3387 {
3388 return false;
3389 }
3390
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003391 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003392 }
3393 else
3394 {
3395 return Fail("%s: ProcessActivation failed", __func__);
3396 }
3397}
3398
3399template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003400 typename HalOperation = typename HalPolicy::Operation,
3401 typename HalModel = typename HalPolicy::Model>
3402bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003403{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003404 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003405
Mike Kelly3c673942019-07-25 09:26:06 +01003406 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3407 if (!input.IsValid())
3408 {
3409 return Fail("%s: Operation has invalid inputs", __func__);
3410 }
3411
3412 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3413 unsigned int rank = inputInfo.GetNumDimensions();
3414
3415 armnn::PadDescriptor descriptor;
3416 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3417 {
3418 return Fail("%s: Could not convert paddings", __func__);
3419 }
3420
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003421 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3422 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003423 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3424 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3425 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003426 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003427 {
3428 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3429 }
3430
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003431 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003432 if (!output)
3433 {
3434 return Fail("%s: Could not read output", __func__);
3435 }
3436
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003437 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003438
3439 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003440 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3441 {
3442 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3443 IsPadSupported,
3444 data.m_Backends,
3445 isSupported,
3446 inputInfo,
3447 outputInfo,
3448 descriptor);
3449 };
3450
3451 if(!IsDynamicTensor(outputInfo))
3452 {
3453 validateFunc(outputInfo, isSupported);
3454 }
3455 else
3456 {
3457 isSupported = AreDynamicTensorsSupported();
3458 }
3459
Mike Kelly3c673942019-07-25 09:26:06 +01003460 if (!isSupported)
3461 {
3462 return false;
3463 }
3464
3465 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3466 assert(layer != nullptr);
3467 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003468
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003469 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003470}
3471
Mike Kelly0a879362019-07-29 16:56:31 +01003472template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003473 typename HalOperation = typename HalPolicy::Operation,
3474 typename HalModel = typename HalPolicy::Model>
3475bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003476{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003477 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003478
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003479 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3480 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3481 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003482
3483 if (inputOperand == nullptr
3484 || requestedShapeOperand == nullptr
3485 || outputOperand == nullptr)
3486 {
3487 return Fail("%s: Operation has invalid inputs", __func__);
3488 }
3489
3490 if (requestedShapeOperand->dimensions.size() != 1)
3491 {
3492 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3493 __func__, requestedShapeOperand->dimensions.size());
3494 }
3495
3496 std::vector<int32_t> targetDimensions;
3497 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3498 {
3499 return Fail("%s: Could not read values of input 1", __func__);
3500 }
3501
3502 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3503
3504 Shape requestedShape;
3505 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3506 // function that resolves these values into a fully specified tensor shape.
3507 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3508 {
3509 return Fail("%s: Failed to resolve the requested shape", __func__);
3510 }
3511
Mike Kelly46272802019-08-14 17:00:48 +01003512 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3513 if (!input.IsValid())
3514 {
3515 return Fail("%s: Could not read input 0", __func__);
3516 }
3517
3518 armnn::ReshapeDescriptor reshapeDescriptor;
3519 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3520 requestedShape.dimensions.data());
3521
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003522 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3523
Mike Kelly46272802019-08-14 17:00:48 +01003524 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003525 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3526 {
3527 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3528 IsReshapeSupported,
3529 data.m_Backends,
3530 isSupported,
3531 input.GetTensorInfo(),
3532 outputInfo,
3533 reshapeDescriptor);
3534 };
3535
3536 if(!IsDynamicTensor(outputInfo))
3537 {
3538 validateFunc(outputInfo, isSupported);
3539 }
3540 else
3541 {
3542 isSupported = AreDynamicTensorsSupported();
3543 }
3544
Mike Kelly46272802019-08-14 17:00:48 +01003545 if (!isSupported)
3546 {
3547 return false;
3548 }
3549
3550 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3551 assert(layer != nullptr);
3552 input.Connect(layer->GetInputSlot(0));
3553
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003554 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003555}
3556
3557template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003558 typename HalOperation = typename HalPolicy::Operation,
3559 typename HalModel = typename HalPolicy::Model>
3560bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003561{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003562 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003563
Mike Kelly0a879362019-07-29 16:56:31 +01003564 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3565 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3566
3567 if (!input0.IsValid() || !input1.IsValid())
3568 {
3569 return Fail("%s: Operation has invalid inputs", __func__);
3570 }
3571
3572 // The FuseActivation parameter is always the input index 2
3573 // and it should be optional
3574 ActivationFn activationFunction;
3575 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3576 {
3577 return Fail("%s: Operation has invalid inputs", __func__);
3578 }
3579
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003580 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003581 if (!output)
3582 {
3583 return Fail("%s: Could not read output 0", __func__);
3584 }
3585
3586 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003587
3588 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003589 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3590 {
3591 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3592 IsSubtractionSupported,
3593 data.m_Backends,
3594 isSupported,
3595 input0.GetTensorInfo(),
3596 input1.GetTensorInfo(),
3597 outputInfo);
3598 };
3599
3600 if(IsDynamicTensor(outputInfo))
3601 {
3602 isSupported = AreDynamicTensorsSupported();
3603 }
3604 else
3605 {
3606 validateFunc(outputInfo, isSupported);
3607 }
3608
Mike Kelly0a879362019-07-29 16:56:31 +01003609 if (!isSupported)
3610 {
3611 return false;
3612 }
3613
3614 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3615 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3616
3617 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3618 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3619
3620 if (endLayer)
3621 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003622 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003623 if (!isReshapeSupported)
3624 {
3625 return false;
3626 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003627 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data, nullptr, validateFunc);
Mike Kelly0a879362019-07-29 16:56:31 +01003628 }
3629
3630 return Fail("%s: ProcessActivation failed", __func__);
3631}
3632
Finn Williams23b87b32019-07-30 11:44:05 +01003633template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003634 typename HalOperation = typename HalPolicy::Operation,
3635 typename HalModel = typename HalPolicy::Model>
3636bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003637{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003638 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003639
3640 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3641 if (!input.IsValid())
3642 {
3643 return Fail("%s: Operation has invalid inputs", __func__);
3644 }
3645
3646 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3647 unsigned int rank = inputInfo.GetNumDimensions();
3648 if (rank > 4)
3649 {
3650 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3651 }
3652
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003653 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003654 if (!output)
3655 {
3656 return Fail("%s: Could not read output 0", __func__);
3657 }
Mike Kelly46272802019-08-14 17:00:48 +01003658 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3659 {
3660 return Fail("%s: Dynamic output tensors are not supported", __func__);
3661 }
3662
3663 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3664 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003665 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003666
3667 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3668
3669 std::vector<int32_t> axis;
3670 if (!axisOperand)
3671 {
3672 axis.assign(dimensionSequence,
3673 dimensionSequence + rank);
3674 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003675 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003676 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003677 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003678 }
3679
3680 std::vector<uint32_t> outputDims;
3681 for (unsigned int i = 0; i < rank; i++)
3682 {
3683 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3684 auto currentDimension = inputInfo.GetShape()[i];
3685 if (skipSqueeze || currentDimension != 1)
3686 {
3687 outputDims.push_back(currentDimension);
3688 }
3689 }
3690
3691 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3692
3693 armnn::TensorInfo outputInfo = inputInfo;
3694 outputInfo.SetShape(outShape);
3695
3696 armnn::ReshapeDescriptor reshapeDesc;
3697 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3698
3699 bool isSupported = false;
3700 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3701 IsReshapeSupported,
3702 data.m_Backends,
3703 isSupported,
3704 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003705 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003706 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003707
Mike Kelly46272802019-08-14 17:00:48 +01003708 if (!isSupported)
3709 {
3710 return false;
3711 }
3712
3713 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3714 assert(layer != nullptr);
3715 input.Connect(layer->GetInputSlot(0));
3716
3717 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3718}
3719
3720template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003721 typename HalOperation = typename HalPolicy::Operation,
3722 typename HalModel = typename HalPolicy::Model>
3723bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003724{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003725 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003726
3727 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3728 if (!input.IsValid())
3729 {
3730 return Fail("%s: Operation has invalid inputs", __func__);
3731 }
3732
3733 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3734 unsigned int rank = inputInfo.GetNumDimensions();
3735 if (rank > 4)
3736 {
3737 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3738 }
3739
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003740 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003741 if (!output)
3742 {
3743 return Fail("%s: Could not read output 0", __func__);
3744 }
3745
3746 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003747
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003748 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3749 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3750 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003751
3752 std::vector<int32_t> beginValues;
3753 std::vector<int32_t> endValues;
3754 std::vector<int32_t> stridesValues;
3755
3756 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003757 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003758 {
3759 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3760 {
3761 return false;
3762 }
3763
3764 if (operandValues.size() != rank)
3765 {
3766 return false;
3767 }
3768
3769 return true;
3770 };
3771
3772 if (!ValidateInputOperands(*beginOperand, beginValues)
3773 || !ValidateInputOperands(*endOperand, endValues)
3774 || !ValidateInputOperands(*stridesOperand, stridesValues))
3775 {
3776 return Fail("%s: Operation has invalid input operand", __func__);
3777 }
3778
3779 // Stride cannot have value '0'
3780 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3781 {
3782 return Fail("%s: Stride must be non-zero value.", __func__);
3783 }
3784
3785 armnn::StridedSliceDescriptor descriptor;
3786 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3787 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3788 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3789 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3790
3791 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3792 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3793 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3794 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3795 {
3796 return Fail("%s: Operation has invalid inputs", __func__);
3797 }
3798
3799 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003800 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3801 {
3802 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3803 IsStridedSliceSupported,
3804 data.m_Backends,
3805 isSupported,
3806 inputInfo,
3807 outputInfo,
3808 descriptor);
3809 };
3810
3811 if(IsDynamicTensor(outputInfo))
3812 {
3813 isSupported = AreDynamicTensorsSupported();
3814 }
3815 else
3816 {
3817 validateFunc(outputInfo, isSupported);
3818 }
3819
Mike Kelly46272802019-08-14 17:00:48 +01003820 if (!isSupported)
3821 {
3822 return false;
3823 }
3824
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003825 // Check if slice can fit in a inferred output
3826 armnn::TensorShape inputShape = inputInfo.GetShape();
3827 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3828 {
3829 int stride = descriptor.m_Stride[i];
3830 int start = descriptor.GetStartForAxis(inputShape, i);
3831 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3832
3833 if (descriptor.m_ShrinkAxisMask & (1 << i))
3834 {
3835 // If the difference between the start point and the end point of the slice on an axis being shrunk
3836 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3837 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3838 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3839 {
3840 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3841 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003842
3843 if(stride < 0)
3844 {
3845 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3846 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003847 }
3848 }
3849
Mike Kelly46272802019-08-14 17:00:48 +01003850 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3851 assert(layer != nullptr);
3852 input.Connect(layer->GetInputSlot(0));
3853
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003854 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003855}
3856
3857template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003858 typename HalOperation = typename HalPolicy::Operation,
3859 typename HalModel = typename HalPolicy::Model>
3860bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003861{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003862 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003863
3864 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3865 if (!input.IsValid())
3866 {
3867 return Fail("%s: Operation has invalid inputs", __func__);
3868 }
3869
3870 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3871 unsigned int rank = inputInfo.GetNumDimensions();
3872 if (rank > 4)
3873 {
3874 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3875 }
3876
3877 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3878 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003879 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003880
3881 std::vector<int32_t> perm(rank);
3882 if (!permOperand)
3883 {
3884 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3885 for (unsigned int i = rank; i > 0; i--)
3886 {
3887 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3888 }
3889 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003890 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003891 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003892 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003893 }
3894
3895 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3896
Mike Kelly4a956582020-02-28 10:32:09 +00003897 armnn::TransposeDescriptor transposeDesc;
3898 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003899
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003900 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003901 if (!output)
3902 {
3903 return Fail("%s: Could not read output 0", __func__);
3904 }
3905
3906 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3907
3908 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003909 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3910 {
3911 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3912 IsTransposeSupported,
3913 data.m_Backends,
3914 isSupported,
3915 inputInfo,
3916 outputInfo,
3917 transposeDesc);
3918 };
3919
3920 if(IsDynamicTensor(outputInfo))
3921 {
3922 isSupported = AreDynamicTensorsSupported();
3923 }
3924 else
3925 {
3926 validateFunc(outputInfo, isSupported);
3927 }
3928
Mike Kelly46272802019-08-14 17:00:48 +01003929 if (!isSupported)
3930 {
3931 return false;
3932 }
3933
Mike Kelly4a956582020-02-28 10:32:09 +00003934 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003935 assert(layer != nullptr);
3936 input.Connect(layer->GetInputSlot(0));
3937
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003938 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003939}
3940
3941template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003942 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003943 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003944 typename HalModel = typename HalPolicy::Model>
3945bool ConvertBatchToSpaceNd(const HalOperation& operation,
3946 const HalModel& model,
3947 ConversionData& data)
3948{
Finn Williams23b87b32019-07-30 11:44:05 +01003949
3950 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3951 if (!input.IsValid())
3952 {
3953 return Fail("%s: Operation has invalid inputs", __func__);
3954 }
3955
3956 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3957 if (!output)
3958 {
3959 return Fail("%s: Could not read output 0", __func__);
3960 }
3961
3962 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01003963
3964 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3965 if (!blockOperand)
3966 {
3967 return Fail("%s: Could not read input 1", __func__);
3968 }
3969
3970 // Convert the block operand to int32
3971 std::vector<int32_t> block;
3972 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3973 {
3974 return Fail("%s: Input 1 has invalid values", __func__);
3975 }
3976
3977 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3978
3979 unsigned int rank = inputInfo.GetNumDimensions();
3980 if (rank != 4)
3981 {
3982 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3983 }
3984
3985 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3986 {
3987 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3988 " greater than or equal to 1", __func__);
3989 }
3990
3991 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3992 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3993 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3994
Kevin May42477c12020-03-26 13:34:14 +00003995 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003996 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003997 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003998 }
3999 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4000 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4001
4002 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004003 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4004 {
4005 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4006 IsBatchToSpaceNdSupported,
4007 data.m_Backends,
4008 isSupported,
4009 inputInfo,
4010 outputInfo,
4011 batchToSpaceNdDesc);
4012 };
4013
4014 if(!IsDynamicTensor(outputInfo))
4015 {
4016 validateFunc(outputInfo, isSupported);
4017 }
4018 else
4019 {
4020 isSupported = AreDynamicTensorsSupported();
4021 }
4022
4023
Finn Williams23b87b32019-07-30 11:44:05 +01004024 if (!isSupported)
4025 {
4026 return false;
4027 }
4028
4029 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4030 assert(layer != nullptr);
4031 input.Connect(layer->GetInputSlot(0));
4032
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004033 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004034}
Mike Kelly0a879362019-07-29 16:56:31 +01004035
Finn Williamsd74c5052019-07-30 17:06:00 +01004036template<typename HalPolicy,
4037 typename HalOperation = typename HalPolicy::Operation,
4038 typename HalOperand = typename HalPolicy::Operand,
4039 typename HalModel = typename HalPolicy::Model>
4040bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4041{
4042 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4043 if (!input.IsValid())
4044 {
4045 return Fail("%s: Operation has invalid inputs", __func__);
4046 }
4047
4048 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4049 unsigned int rank = inputInfo.GetNumDimensions();
4050 unsigned int spatialDim = rank - 2;
4051
4052 if (rank != 4)
4053 {
4054 Fail("%s: Only inputs with rank 4 are supported", __func__);
4055 }
4056
4057 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4058 if (!output)
4059 {
4060 return Fail("%s: Could not read output 0", __func__);
4061 }
4062
4063 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004064
4065 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4066 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4067
4068 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4069 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4070 {
4071 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4072 }
4073
4074 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004075 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4076 {
4077 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4078 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004079 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4080 {
4081 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4082 }
4083
4084 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4085 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4086 {
4087 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4088 }
4089
4090 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4091 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004092 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4093 {
4094 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4095 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004096 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4097 {
4098 int paddingBeforeInput = paddings[i];
4099 int paddingAfterInput = paddings[i + 1];
4100 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4101 {
4102 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4103 }
4104
4105 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4106 }
4107
4108 armnn::SpaceToBatchNdDescriptor descriptor;
4109 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4110 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4111 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4112
Kevin May42477c12020-03-26 13:34:14 +00004113 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004114 {
4115 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4116 }
4117
4118 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004119 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4120 {
4121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4122 IsSpaceToBatchNdSupported,
4123 data.m_Backends,
4124 isSupported,
4125 inputInfo,
4126 outputInfo,
4127 descriptor);
4128 };
4129
4130 if(IsDynamicTensor(outputInfo))
4131 {
4132 isSupported = AreDynamicTensorsSupported();
4133 }
4134 else
4135 {
4136 validateFunc(outputInfo, isSupported);
4137 }
4138
Finn Williamsd74c5052019-07-30 17:06:00 +01004139 if (!isSupported)
4140 {
4141 return false;
4142 }
4143
4144 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4145 assert(layer != nullptr);
4146 input.Connect(layer->GetInputSlot(0));
4147
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004148 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004149}
4150
saoste01b8471482018-10-10 09:44:51 +01004151} // namespace armnn_driver