blob: 5dc9993d497c73de81f71acd0b8be887e87436e8 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Kevin May42477c12020-03-26 13:34:14 +0000186#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000192 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 type == V1_2::OperandType::TENSOR_FLOAT16 ||
194 type == V1_2::OperandType::TENSOR_FLOAT32 ||
195 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
198 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199 type == V1_2::OperandType::TENSOR_INT32;
200}
201
202#endif
203
Kevin May42477c12020-03-26 13:34:14 +0000204#ifdef ARMNN_ANDROID_NN_V1_3
205
206// Support within the 1.3 driver for specific tensor data types
207inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
208{
209 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100210 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000211 type == V1_3::OperandType::TENSOR_FLOAT16 ||
212 type == V1_3::OperandType::TENSOR_FLOAT32 ||
213 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
216 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
217 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
218 type == V1_3::OperandType::TENSOR_INT32;
219}
220
221#endif
222
Mike Kellyb5fdf382019-06-11 16:35:25 +0100223inline bool IsBool(V1_0::Operand)
224{
225 return false;
226}
227
Kevin May42477c12020-03-26 13:34:14 +0000228inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100229{
230 return false;
231}
232
Kevin May42477c12020-03-26 13:34:14 +0000233#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234
235inline bool IsBool(V1_2::Operand operand)
236{
237 return operand.type == V1_2::OperandType::BOOL;
238}
239
Sadik Armagan61113162019-07-25 09:09:40 +0100240/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000241inline bool Is12OrLaterOperand(V1_2::Operand)
242{
243 return true;
244}
245
246#endif
247
248#ifdef ARMNN_ANDROID_NN_V1_3
249
250inline bool IsBool(V1_3::Operand operand)
251{
252 return operand.type == V1_3::OperandType::BOOL;
253}
254
255/// Checks if a operand is 1_2 Operand
256inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100257{
258 return true;
259}
260
Mike Kellyb5fdf382019-06-11 16:35:25 +0100261#endif
262
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100263template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000264armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
265 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100266 armnn::TensorInfo reshapeInfo)
267{
268 armnn::ReshapeDescriptor reshapeDescriptor;
269 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
270
271 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100272 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273
274 // Attach the input layer to the reshape layer
275 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
276 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
277
278 return *reshapeLayer;
279}
280
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000281bool BroadcastTensor(LayerInputHandle& input0,
282 LayerInputHandle& input1,
283 armnn::IConnectableLayer* startLayer,
284 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100285{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100286 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100287
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
289 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
290
291 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
292 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
293
294 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100295 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
297 input0.Connect(startLayer->GetInputSlot(0));
298 input1.Connect(startLayer->GetInputSlot(1));
299
Sadik Armagan64b19b52019-08-19 09:49:58 +0100300 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301 }
302
303 // Since the number of dimensions do not match then we need to add degenerate dimensions
304 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
305
306 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
307 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
308 boost::numeric_cast<int>(inputDimensions1));
309
310 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
311 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
312 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
313
314 const armnn::TensorShape& smallShape = smallInfo.GetShape();
315 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
316 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
317 {
318 reshapedDimensions[i] = smallShape[i - sizeDifference];
319 }
320
321 armnn::TensorInfo reshapedInfo = smallInfo;
322 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
323 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
326 armnn::ReshapeDescriptor reshapeDescriptor;
327
328 bool isSupported = false;
329 FORWARD_LAYER_SUPPORT_FUNC(__func__,
330 IsReshapeSupported,
331 data.m_Backends,
332 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000333 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100334 reshapedInfo,
335 reshapeDescriptor);
336 if (!isSupported)
337 {
338 return false;
339 }
340
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100341 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100343
344 if (input0IsSmaller)
345 {
346 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
347 //
348 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100349 // | |
350 // Reshape |
351 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100353
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
355 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100356 }
357 else
358 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100359 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
360 //
361 // Input0 Input1
362 // | |
363 // | Reshape
364 // \ /
365 // StartLayer
366
arovir01b0717b52018-09-05 17:03:25 +0100367 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100368 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100369 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100370
371 return true;
arovir01b0717b52018-09-05 17:03:25 +0100372}
373
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000374void CalcPadding(uint32_t input,
375 uint32_t kernel,
376 uint32_t stride,
377 uint32_t& outPadHead,
378 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100379 android::nn::PaddingScheme scheme)
380{
381 int32_t padHead;
382 int32_t padTail;
383 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
384 outPadHead = boost::numeric_cast<uint32_t>(padHead);
385 outPadTail = boost::numeric_cast<uint32_t>(padTail);
386}
387
Kevin May42477c12020-03-26 13:34:14 +0000388#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100389
390void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
391 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
392{
393 int32_t padHead;
394 int32_t padTail;
395 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
396 outPadHead = boost::numeric_cast<uint32_t>(padHead);
397 outPadTail = boost::numeric_cast<uint32_t>(padTail);
398}
399
Mike Kelly26123db2020-01-15 10:02:33 +0000400void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100401 int32_t& outPadTail, android::nn::PaddingScheme scheme)
402{
403 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
404}
405
Mike Kelly86b36d42019-07-12 16:39:33 +0100406#endif
407
Matthew Bentham912b3622019-05-03 15:49:14 +0100408Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100409{
410 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100411 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100412 shape.dimensions = operand.dimensions;
413 shape.scale = operand.scale;
414 shape.offset = operand.zeroPoint;
415 return shape;
416}
417
Kevin May42477c12020-03-26 13:34:14 +0000418#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100419
420Shape GetOperandShape(const V1_2::Operand& operand)
421{
422 Shape shape;
423 shape.type = OperandType(operand.type);
424 shape.dimensions = operand.dimensions;
425 shape.scale = operand.scale;
426 shape.offset = operand.zeroPoint;
427 return shape;
428}
429
430#endif
431
Kevin May42477c12020-03-26 13:34:14 +0000432#ifdef ARMNN_ANDROID_NN_V1_3
433
434Shape GetOperandShape(const V1_3::Operand& operand)
435{
436 Shape shape;
437 shape.type = OperandType(operand.type);
438 shape.dimensions = operand.dimensions;
439 shape.scale = operand.scale;
440 shape.offset = operand.zeroPoint;
441 return shape;
442}
443
444#endif
445
arovir01b0717b52018-09-05 17:03:25 +0100446// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
447// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100448// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
449// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100450void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000451 const armnn::TensorInfo& weightInfo,
452 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100453{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000454 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100455 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 // NOTE: Bias scale is always set to 0 for per-axis quantization and
457 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
458 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 return biasScale * inputInfo.GetQuantizationScale();
461 };
462
463 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
464 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
465
466 biasInfo.SetQuantizationScales(biasScales);
467 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
468
469 ALOGV("Bias quantization params have been updated for per-axis quantization");
470 }
471 else
472 {
473 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
474 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
475 {
476 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
477 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
478 {
479 ALOGW("Bias quantization scale has been modified to match input * weights");
480 biasInfo.SetQuantizationScale(expectedBiasScale);
481 }
arovir01b0717b52018-09-05 17:03:25 +0100482 }
483 }
484}
485
486// 4D Tensor Permutations
487const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100488const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
489
490// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000491const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
492const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100493
494template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000495armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
496 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100497{
498 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000499 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100500
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100501 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100502
503 // Connect input to swizzle layer
504 input.Connect(layer->GetInputSlot(0));
505
506 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000507 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100508 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
509
510 return *layer;
511}
512
arovir01b0717b52018-09-05 17:03:25 +0100513bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
514 const armnn::TensorShape & outputShape,
515 uint32_t concatDim)
516{
517 // Validate the output shape is correct given the input shapes (which have just been validated)
518 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
519 if (outputShape.GetNumDimensions() != numDimensions)
520 {
521 return Fail("%s: Output shape has wrong number of dimensions", __func__);
522 }
523
524 unsigned int outputSizeAlongConcatenatedDimension = 0;
525 for (unsigned int i = 0; i < inputShapes.size(); i++)
526 {
527 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
528 }
529
530 for (unsigned int i = 0; i < numDimensions; ++i)
531 {
532 if (i == concatDim)
533 {
534 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
535 {
536 return Fail(
537 "%s: Invalid output shape for dimension %d (%d != %d)",
538 __func__,
539 i,
540 outputShape[i],
541 outputSizeAlongConcatenatedDimension);
542 }
543 }
544 else
545 {
546 if (outputShape[i] != inputShapes[0][i])
547 {
548 return Fail("%s: Invalid output shape", __func__);
549 }
550 }
551 }
552
553 return true;
554}
555
556bool RequiresReshape(armnn::TensorShape & inputShape)
557{
558 return inputShape.GetNumDimensions() < 3;
559}
560
arovir01b0717b52018-09-05 17:03:25 +0100561void SwizzleInputs(armnn::INetwork& network,
562 std::vector<LayerInputHandle>& inputs,
563 std::vector<armnn::TensorShape>& inputShapes,
564 const armnn::PermutationVector& mapping)
565{
566 if (!mapping.IsEqual(IdentityPermutation4D))
567 {
568 size_t nInputs = inputs.size();
569 for (size_t i=0; i<nInputs; ++i)
570 {
571 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000572 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100573 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
574 auto& outputInfo = outputSlot.GetTensorInfo();
575 // replace inputs with the swizzled ones
576 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
577 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
578 }
579 }
580}
581
Teresa Charlin185f5882020-04-06 21:59:18 +0100582bool TransposeInputTensors(ConversionData& data,
583 std::vector<LayerInputHandle>& inputs,
584 std::vector<armnn::TensorShape>& inputShapes,
585 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000586{
587 if (!mapping.IsEqual(IdentityPermutation4D))
588 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100589 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590 size_t nInputs = inputs.size();
591 for (size_t i=0; i<nInputs; ++i)
592 {
593 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000594 armnn::TransposeDescriptor transposeDesc;
595 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100596 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000597
598 bool isSupported = false;
599 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000600 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601 data.m_Backends,
602 isSupported,
603 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100604 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000605 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606 if (!isSupported)
607 {
608 return false;
609 }
610
611 }
612 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
613 }
614 return true;
615}
616
617
narpra01f176d5a2018-11-18 20:17:48 +0000618bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
619 int32_t & concatDimension,
620 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100621{
narpra01f176d5a2018-11-18 20:17:48 +0000622 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100623 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100624
625 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000626 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
627 // or along dimension 0 or 2 for a 3-D tensor.
628 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100629 {
narpra01f176d5a2018-11-18 20:17:48 +0000630 concatDimension = 1;
631 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
632 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100633 }
narpra01f176d5a2018-11-18 20:17:48 +0000634 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100635 {
narpra01f176d5a2018-11-18 20:17:48 +0000636 concatDimension = 0;
637 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
638 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100639 }
narpra01f176d5a2018-11-18 20:17:48 +0000640 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100641}
642
643} // anonymous namespace
644
645namespace armnn_driver
646{
647
648//// Creates an ArmNN activation layer and connects it to the given layer, if the
649//// passed in AndroidNN activation function requires so.
650//// @return The end layer of the sequence of layers built for the given AndroidNN
651//// activation function or nullptr if an error occurred (e.g. unsupported activation).
652//// Note that the end layer matches the input layer if no activation is required
653//// (the sequence of layers has length 1).
654armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
655 ActivationFn activation,
656 armnn::IConnectableLayer* prevLayer,
657 ConversionData& data);
658
659} // namespace armnn_driver
660
661///
662/// Utility templates
663///
664
665namespace armnn_driver
666{
667
668using namespace android::nn;
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
671 typename HalOperand = typename HalPolicy::Operand,
672 typename HalOperation = typename HalPolicy::Operation,
673 typename HalModel = typename HalPolicy::Model>
674const HalOperand* GetInputOperand(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100677 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100678{
679 if (inputIndex >= operation.inputs.size())
680 {
saoste01b8471482018-10-10 09:44:51 +0100681 if (failOnIndexOutOfBounds)
682 {
683 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
684 }
arovir01b0717b52018-09-05 17:03:25 +0100685 return nullptr;
686 }
687
Kevin May42477c12020-03-26 13:34:14 +0000688 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100689 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000690 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalOperation = typename HalPolicy::Operation,
696 typename HalModel = typename HalPolicy::Model>
697const HalOperand* GetOutputOperand(const HalOperation& operation,
698 uint32_t outputIndex,
699 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100700{
701 if (outputIndex >= operation.outputs.size())
702 {
703 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
704 return nullptr;
705 }
706
707 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100708 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100709
Kevin May42477c12020-03-26 13:34:14 +0000710 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100716const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100717 const HalModel& model,
718 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100720{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100722
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100724 switch (operand.lifetime)
725 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100727 {
728 // Constant found in model.operandValues
729 valueStart = &model.operandValues[operand.location.offset];
730 break;
731 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100733 {
734 // Constant specified via a Memory object
735 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
736 break;
737 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000739 {
740 // An optional input tensor with no values is not an error so should not register as a fail
741 if (optional)
742 {
743 valueStart = nullptr;
744 break;
745 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100746 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000747 }
arovir01b0717b52018-09-05 17:03:25 +0100748 default:
749 {
750 // Unsupported/invalid (e.g. can't get value of an input to the model)
751 Fail("%s: unsupported/invalid operand lifetime: %s",
752 __func__, toString(operand.lifetime).c_str());
753 valueStart = nullptr;
754 }
755 }
756
757 return valueStart;
758}
759
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100760template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100761 typename HalOperation = typename HalPolicy::Operation,
762 typename HalModel = typename HalPolicy::Model,
763 typename HalOperandType = typename HalPolicy::OperandType>
764bool GetOperandType(const HalOperation& operation,
765 uint32_t inputIndex,
766 const HalModel& model,
767 HalOperandType& type)
768{
769 using HalOperand = typename HalPolicy::Operand;
770
771 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
772 if (!operand)
773 {
774 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
775 }
776
777 type = operand->type;
778 return true;
779}
780
781template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000782 typename HalOperand = typename HalPolicy::Operand>
783bool IsOperandConstant(const HalOperand& operand)
784{
785 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
786
787 HalOperandLifeTime lifetime = operand.lifetime;
788
789 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
790 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
791 lifetime == HalOperandLifeTime::NO_VALUE;
792}
793
794template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 typename HalOperand = typename HalPolicy::Operand,
796 typename HalModel = typename HalPolicy::Model>
797ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
798 const HalModel& model,
799 const ConversionData& data,
800 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
801 const armnn::TensorShape* overrideTensorShape = nullptr,
802 bool optional = false)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 if (!IsOperandTypeSupportedForTensors(operand.type))
805 {
806 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
807 return ConstTensorPin();
808 }
809
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000810 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 {
812 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
813 return ConstTensorPin();
814 }
815
816 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
817 if (!valueStart)
818 {
819 if (optional)
820 {
821 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
822 return ConstTensorPin(true);
823 }
824 // mandatory tensor with no values
825 Fail("%s: failed to get operand address", __func__);
826 return ConstTensorPin();
827 }
828
829 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000830 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
831 if (tensorInfo.HasPerAxisQuantization())
832 {
833 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
834 }
835
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 if (overrideTensorShape != nullptr)
837 {
838 tensorInfo.SetShape(*overrideTensorShape);
839 }
840 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
841}
842
843template<typename HalPolicy,
844 typename HalOperation = typename HalPolicy::Operation,
845 typename HalModel = typename HalPolicy::Model>
846ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
847 uint32_t inputIndex,
848 const HalModel& model,
849 const ConversionData& data,
850 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
851 const armnn::TensorShape* overrideTensorShape = nullptr,
852 bool optional = false)
853{
854 using HalOperand = typename HalPolicy::Operand;
855
856 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
857 if (!operand)
858 {
859 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
860 return ConstTensorPin();
861 }
862 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
863 model,
864 data,
865 dimensionMappings,
866 overrideTensorShape,
867 optional);
868}
869
870template<typename HalPolicy,
871 typename OutputType,
872 typename HalOperandType = typename HalPolicy::OperandType,
873 typename HalOperation = typename HalPolicy::Operation,
874 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100875bool GetInputScalar(const HalOperation& operation,
876 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100877 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100878 OutputType& outValue,
879 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100880 const ConversionData& data,
881 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100882{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100883 using HalOperand = typename HalPolicy::Operand;
884
885 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100886 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100887 {
888 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
889 }
890
Sadik Armagan813f2302020-05-19 14:10:30 +0100891 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100892 {
893 return Fail("%s: unexpected operand type: %s (should be %s)",
894 __func__, toString(operand->type).c_str(), toString(type).c_str());
895 }
896
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100898 {
899 return Fail("%s: incorrect operand location length: %i (should be %i)",
900 __func__, operand->location.length, sizeof(OutputType));
901 }
902
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100903 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100904 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100905 {
906 return Fail("%s: failed to get address for operand", __func__);
907 }
908
Sadik Armagan813f2302020-05-19 14:10:30 +0100909 if(!optional)
910 {
911 outValue = *(static_cast<const OutputType*>(valueAddress));
912 }
913
arovir01b0717b52018-09-05 17:03:25 +0100914 return true;
915}
916
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917template<typename HalPolicy,
918 typename HalOperation = typename HalPolicy::Operation,
919 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100920bool GetInputInt32(const HalOperation& operation,
921 uint32_t inputIndex,
922 int32_t& outValue,
923 const HalModel& model,
924 const ConversionData& data)
925{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100926 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100927}
928
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100929template<typename HalPolicy,
930 typename HalOperation = typename HalPolicy::Operation,
931 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100932bool GetInputFloat32(const HalOperation& operation,
933 uint32_t inputIndex,
934 float& outValue,
935 const HalModel& model,
936 const ConversionData& data)
937{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100939}
940
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100941template<typename HalPolicy,
942 typename HalOperation = typename HalPolicy::Operation,
943 typename HalOperandType = typename HalPolicy::OperandType,
944 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100945bool GetInputActivationFunctionImpl(const HalOperation& operation,
946 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100947 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100948 ActivationFn& outActivationFunction,
949 const HalModel& model,
950 const ConversionData& data)
951{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100952 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100953 {
954 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
955 __func__,
956 toString(type).c_str(),
957 toString(OperandType::INT32).c_str(),
958 toString(OperandType::TENSOR_INT32).c_str());
959 }
960
961 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100963 {
964 return Fail("%s: failed to get activation input value", __func__);
965 }
966 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
967 return true;
968}
969
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100970template<typename HalPolicy,
971 typename HalOperation = typename HalPolicy::Operation,
972 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100973bool GetInputActivationFunction(const HalOperation& operation,
974 uint32_t inputIndex,
975 ActivationFn& outActivationFunction,
976 const HalModel& model,
977 const ConversionData& data)
978{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979 return GetInputActivationFunctionImpl<HalPolicy>(operation,
980 inputIndex,
981 HalPolicy::OperandType::INT32,
982 outActivationFunction,
983 model,
984 data);
arovir01b0717b52018-09-05 17:03:25 +0100985}
986
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987template<typename HalPolicy,
988 typename HalOperation = typename HalPolicy::Operation,
989 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100990bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
991 uint32_t inputIndex,
992 ActivationFn& outActivationFunction,
993 const HalModel& model,
994 const ConversionData& data)
995{
996 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100997 return GetInputActivationFunctionImpl<HalPolicy>(operation,
998 inputIndex,
999 HalPolicy::OperandType::INT32,
1000 outActivationFunction,
1001 model,
1002 data);
arovir01b0717b52018-09-05 17:03:25 +01001003}
1004
1005
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001006template<typename HalPolicy,
1007 typename HalOperation = typename HalPolicy::Operation,
1008 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001009bool GetOptionalInputActivation(const HalOperation& operation,
1010 uint32_t inputIndex,
1011 ActivationFn& activationFunction,
1012 const HalModel& model,
1013 const ConversionData& data)
1014{
1015 if (operation.inputs.size() <= inputIndex)
1016 {
1017 activationFunction = ActivationFn::kActivationNone;
1018 }
1019 else
1020 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001021 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001022 {
1023 return Fail("%s: Operation has invalid inputs", __func__);
1024 }
1025 }
1026 return true;
1027}
1028
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001029template<typename HalPolicy,
1030 typename ConvolutionDescriptor,
1031 typename HalOperation = typename HalPolicy::Operation,
1032 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001033bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1034 uint32_t dilationXIndex,
1035 ConvolutionDescriptor& descriptor,
1036 const HalModel& model,
1037 const ConversionData& data)
1038{
1039 bool success = true;
1040 if (operation.inputs.size() >= dilationXIndex + 2)
1041 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042 success &= GetInputScalar<HalPolicy>(operation,
1043 dilationXIndex,
1044 HalPolicy::OperandType::INT32,
1045 descriptor.m_DilationX,
1046 model,
1047 data);
1048 success &= GetInputScalar<HalPolicy>(operation,
1049 dilationXIndex + 1,
1050 HalPolicy::OperandType::INT32,
1051 descriptor.m_DilationY,
1052 model,
1053 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001054 }
1055
1056 return success;
1057}
1058
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001060 typename HalOperation = typename HalPolicy::Operation,
1061 typename HalModel = typename HalPolicy::Model>
1062bool GetOptionalBool(const HalOperation& operation,
1063 uint32_t inputIndex,
1064 const HalModel& model,
1065 const ConversionData& data)
1066{
1067 using HalOperand = typename HalPolicy::Operand;
1068
1069 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1070 if (!operand)
1071 {
1072 return false;
1073 }
1074
1075 if (!IsBool(*operand))
1076 {
1077 return false;
1078 }
1079
1080 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1081 if (!valueAddress)
1082 {
1083 return false;
1084 }
1085
1086 if (*(static_cast<const bool*>(valueAddress)))
1087 {
1088 return true;
1089 }
1090 else
1091 {
1092 return false;
1093 }
1094}
1095
1096template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 typename HalOperand = typename HalPolicy::Operand,
1098 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001099bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001100 std::vector<int32_t>& outValues,
1101 const HalModel& model,
1102 const ConversionData& data)
1103{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001104 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001105 {
1106 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1107 }
1108
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001109 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001110 if (!startAddress)
1111 {
1112 return Fail("%s: failed to get operand address", __func__, operand.type);
1113 }
1114
1115 // Check number of bytes is sensible
1116 const uint32_t numBytes = operand.location.length;
1117 if (numBytes % sizeof(int32_t) != 0)
1118 {
1119 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1120 __func__, numBytes, sizeof(int32_t));
1121 }
1122
1123 outValues.resize(numBytes / sizeof(int32_t));
1124 memcpy(outValues.data(), startAddress, numBytes);
1125 return true;
1126}
1127
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001128template<typename HalPolicy,
1129 typename HalOperation = typename HalPolicy::Operation,
1130 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001131bool GetInputPaddingScheme(const HalOperation& operation,
1132 uint32_t inputIndex,
1133 PaddingScheme& outPaddingScheme,
1134 const HalModel& model,
1135 const ConversionData& data)
1136{
1137 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001139 {
1140 return Fail("%s: failed to get padding scheme input value", __func__);
1141 }
1142
1143 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1144 return true;
1145}
1146
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001147template<typename HalPolicy,
1148 typename HalOperation = typename HalPolicy::Operation,
1149 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001150LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1151 uint32_t inputIndex,
1152 const HalModel& model,
1153 ConversionData& data)
1154{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001155 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001156 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1158
1159 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001160 if (!operand)
1161 {
1162 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1163 return LayerInputHandle();
1164 }
1165
1166 if (!IsOperandTypeSupportedForTensors(operand->type))
1167 {
1168 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1169 return LayerInputHandle();
1170 }
1171
Sadik Armagan44bcc022019-06-18 17:21:36 +01001172 try
arovir01b0717b52018-09-05 17:03:25 +01001173 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001174 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001175 if (IsDynamicTensor(operandTensorInfo))
1176 {
1177 Fail("%s: dynamic input tensors are not supported", __func__);
1178 return LayerInputHandle();
1179 }
arovir01b0717b52018-09-05 17:03:25 +01001180
Sadik Armagan44bcc022019-06-18 17:21:36 +01001181 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001182 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001184 {
1185 // NOTE: We must check whether we can support the input tensor on at least one
1186 // of the provided backends; otherwise we cannot convert the operation
1187 bool isInputSupported = false;
1188 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1189 IsInputSupported,
1190 data.m_Backends,
1191 isInputSupported,
1192 operandTensorInfo);
1193
1194 if (!isInputSupported)
1195 {
1196 Fail("%s: unsupported input tensor", __func__);
1197 return LayerInputHandle();
1198 }
1199
1200 BOOST_FALLTHROUGH; // intentional fallthrough
1201 }
1202 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001203 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001204 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001205 // The tensor is either an operand internal to the model, or a model input.
1206 // It can be associated with an ArmNN output slot for an existing layer.
1207
1208 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1209 const uint32_t operandIndex = operation.inputs[inputIndex];
1210 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001211 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001212 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001213 case HalOperandLifeTime::CONSTANT_REFERENCE:
1214 {
1215 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1216 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1217 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001218 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001219 bool isSupported = false;
1220 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1221 IsConstantSupported,
1222 data.m_Backends,
1223 isSupported,
1224 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001225 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001226 {
1227 return LayerInputHandle();
1228 }
1229
1230 armnn::IConnectableLayer* constantLayer =
1231 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1232 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1233 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1234
1235 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1236 }
1237 else
1238 {
1239 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001240 return LayerInputHandle();
1241 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001242 break;
arovir01b0717b52018-09-05 17:03:25 +01001243 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001244 default:
arovir01b0717b52018-09-05 17:03:25 +01001245 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001246 // Unsupported lifetime for an input tensor
1247 Fail("%s: unsupported lifetime for input tensor: %s",
1248 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001249 return LayerInputHandle();
1250 }
arovir01b0717b52018-09-05 17:03:25 +01001251 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001252 }
1253 catch (UnsupportedOperand<HalOperandType>& e)
1254 {
1255 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1256 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001257 }
1258}
1259
Kevin May42477c12020-03-26 13:34:14 +00001260
1261#ifdef ARMNN_ANDROID_NN_V1_3
1262template<typename HalPolicy>
1263LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1264 uint32_t inputIndex,
1265 const::android::hardware::neuralnetworks::V1_3::Model& model,
1266 ConversionData& data)
1267{
1268 using HalOperand = typename HalPolicy::Operand;
1269 using HalOperandType = typename HalPolicy::OperandType;
1270 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1271
1272 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1273 if (!operand)
1274 {
1275 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1276 return LayerInputHandle();
1277 }
1278
1279 if (!IsOperandTypeSupportedForTensors(operand->type))
1280 {
1281 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1282 return LayerInputHandle();
1283 }
1284
1285 try
1286 {
1287 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1288 if (IsDynamicTensor(operandTensorInfo))
1289 {
1290 Fail("%s: dynamic input tensors are not supported", __func__);
1291 return LayerInputHandle();
1292 }
1293
1294 switch (operand->lifetime)
1295 {
1296 case HalOperandLifeTime::SUBGRAPH_INPUT:
1297 {
1298 // NOTE: We must check whether we can support the input tensor on at least one
1299 // of the provided backends; otherwise we cannot convert the operation
1300 bool isInputSupported = false;
1301 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1302 IsInputSupported,
1303 data.m_Backends,
1304 isInputSupported,
1305 operandTensorInfo);
1306
1307 if (!isInputSupported)
1308 {
1309 Fail("%s: unsupported input tensor", __func__);
1310 return LayerInputHandle();
1311 }
1312
1313 BOOST_FALLTHROUGH; // intentional fallthrough
1314 }
1315 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1316 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1317 {
1318 // The tensor is either an operand internal to the model, or a model input.
1319 // It can be associated with an ArmNN output slot for an existing layer.
1320
1321 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1322 const uint32_t operandIndex = operation.inputs[inputIndex];
1323 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1324 }
1325 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1326 case HalOperandLifeTime::CONSTANT_REFERENCE:
1327 {
1328 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1329 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1330 if (tensorPin.IsValid())
1331 {
1332 bool isSupported = false;
1333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1334 IsConstantSupported,
1335 data.m_Backends,
1336 isSupported,
1337 tensorPin.GetConstTensor().GetInfo());
1338 if (!isSupported)
1339 {
1340 return LayerInputHandle();
1341 }
1342
1343 armnn::IConnectableLayer* constantLayer =
1344 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1345 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1346 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1347
1348 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1349 }
1350 else
1351 {
1352 Fail("%s: invalid operand tensor", __func__);
1353 return LayerInputHandle();
1354 }
1355 break;
1356 }
1357 default:
1358 {
1359 // Unsupported lifetime for an input tensor
1360 Fail("%s: unsupported lifetime for input tensor: %s",
1361 __func__, toString(operand->lifetime).c_str());
1362 return LayerInputHandle();
1363 }
1364 }
1365 }
1366 catch (UnsupportedOperand<HalOperandType>& e)
1367 {
1368 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1369 return LayerInputHandle();
1370 }
1371}
1372#endif
1373
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001374template<typename HalPolicy,
1375 typename HalOperation = typename HalPolicy::Operation,
1376 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001377bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1378 uint32_t operationOutputIndex,
1379 armnn::IConnectableLayer& layer,
1380 uint32_t layerOutputIndex,
1381 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001382 ConversionData& data,
1383 const armnn::TensorInfo* overrideOutputInfo = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001384{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001385 using HalOperand = typename HalPolicy::Operand;
1386
1387 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001388 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1389 {
1390 return false;
1391 }
1392
1393 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1394
1395 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1396 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1397
Sadik Armagan813f2302020-05-19 14:10:30 +01001398 if (overrideOutputInfo == nullptr)
1399 {
1400 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1401 }
1402 else
1403 {
1404 outputSlot.SetTensorInfo(*overrideOutputInfo);
1405 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001406
1407 return true;
1408}
1409
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001410template<typename HalPolicy,
1411 typename HalOperation = typename HalPolicy::Operation,
1412 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001413armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1414 uint32_t inputIndex,
1415 const HalModel& model,
1416 ConversionData& data)
1417{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001418 using HalOperand = typename HalPolicy::Operand;
1419
1420 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001421 if (!operand)
1422 {
1423 return armnn::DataLayout::NHWC;
1424 }
1425
1426 if (!IsBool(*operand))
1427 {
1428 return armnn::DataLayout::NHWC;
1429 }
1430
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001431 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001432 if (!valueAddress)
1433 {
1434 return armnn::DataLayout::NHWC;
1435 }
1436
1437 if (*(static_cast<const bool*>(valueAddress)))
1438 {
1439 return armnn::DataLayout::NCHW;
1440 }
1441 else
1442 {
1443 return armnn::DataLayout::NHWC;
1444 }
1445}
1446
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001447template<typename HalPolicy,
1448 typename HalOperation = typename HalPolicy::Operation,
1449 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001450bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1451 uint32_t outputIndex,
1452 armnn::IConnectableLayer& layer,
1453 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001454 ConversionData& data,
1455 const armnn::TensorInfo* overrideOutputInfo = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001457 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1458 outputIndex,
1459 layer,
1460 outputIndex,
1461 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001462 data,
1463 overrideOutputInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001464}
1465
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001466template<typename HalPolicy,
1467 typename HalOperation = typename HalPolicy::Operation,
1468 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001469bool ConvertToActivation(const HalOperation& operation,
1470 const char* operationName,
1471 const armnn::ActivationDescriptor& activationDesc,
1472 const HalModel& model,
1473 ConversionData& data)
1474{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001475 using HalOperand = typename HalPolicy::Operand;
1476
1477 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001478 if (!input.IsValid())
1479 {
1480 return Fail("%s: Input 0 is invalid", operationName);
1481 }
1482
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001483 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001484 if (!outputOperand)
1485 {
1486 return false;
1487 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001488
1489 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001490 if (IsDynamicTensor(outInfo))
1491 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001492 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001493 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001494
1495 bool isSupported = false;
1496 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1497 IsActivationSupported,
1498 data.m_Backends,
1499 isSupported,
1500 input.GetTensorInfo(),
1501 outInfo,
1502 activationDesc);
1503 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001504 {
1505 return false;
1506 }
1507
1508 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001509 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001510 input.Connect(layer->GetInputSlot(0));
1511
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001512 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001513}
1514
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001515template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001516 typename HalOperation = typename HalPolicy::Operation,
1517 typename HalModel = typename HalPolicy::Model>
1518bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1519{
1520 armnn::ActivationDescriptor desc;
1521 desc.m_Function = armnn::ActivationFunction::ReLu;
1522
1523 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1524}
1525
1526template<typename HalPolicy,
1527 typename HalOperation = typename HalPolicy::Operation,
1528 typename HalModel = typename HalPolicy::Model>
1529bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1530{
1531 armnn::ActivationDescriptor desc;
1532 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1533 desc.m_A = 1.0f;
1534 desc.m_B = -1.0f;
1535
1536 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1537}
1538
1539template<typename HalPolicy,
1540 typename HalOperation = typename HalPolicy::Operation,
1541 typename HalModel = typename HalPolicy::Model>
1542bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1543{
1544 armnn::ActivationDescriptor desc;
1545 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1546 desc.m_A = 6.0f;
1547
1548 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1549}
1550
1551template<typename HalPolicy,
1552 typename HalOperation = typename HalPolicy::Operation,
1553 typename HalModel = typename HalPolicy::Model>
1554bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1555{
1556 armnn::ActivationDescriptor desc;
1557 desc.m_Function = armnn::ActivationFunction::TanH;
1558 desc.m_A = 1.0f; // android nn does not support tanH parameters
1559 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1560
1561 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1562}
1563
1564template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001565 typename HalOperation = typename HalPolicy::Operation,
1566 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001567bool ConvertPaddings(const HalOperation& operation,
1568 const HalModel& model,
1569 ConversionData& data,
1570 unsigned int rank,
1571 armnn::PadDescriptor& padDescriptor)
1572{
1573 using HalOperand = typename HalPolicy::Operand;
1574
1575 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1576 if (!paddingsOperand)
1577 {
1578 return Fail("%s: Could not read paddings operand", __func__);
1579 }
1580
1581 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1582 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1583 {
1584 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1585 }
1586
1587 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001588 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1589 {
1590 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1591 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001592
1593 // add padding for each dimension of input tensor.
1594 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1595 {
1596 int paddingBeforeInput = paddings[i];
1597 int paddingAfterInput = paddings[i + 1];
1598
1599 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1600 {
1601 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1602 }
1603
1604 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1605 }
1606
1607 return true;
1608}
1609
1610template<typename HalPolicy,
1611 typename HalOperation = typename HalPolicy::Operation,
1612 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001613bool ConvertPooling2d(const HalOperation& operation,
1614 const char* operationName,
1615 armnn::PoolingAlgorithm poolType,
1616 const HalModel& model,
1617 ConversionData& data)
1618{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001619 using HalOperand = typename HalPolicy::Operand;
1620 using HalOperandType = typename HalPolicy::OperandType;
1621
1622 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001623 if (!input.IsValid())
1624 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001625 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001626 }
1627
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001628 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001629 if (!output)
1630 {
1631 return Fail("%s: Could not read output 0", __func__);
1632 }
1633
1634 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1635 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1636
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001637 if (IsDynamicTensor(outputInfo))
1638 {
1639 return Fail("%s: Dynamic output tensors are not supported", __func__);
1640 }
1641
arovir01b0717b52018-09-05 17:03:25 +01001642 armnn::Pooling2dDescriptor desc;
1643 desc.m_PoolType = poolType;
1644 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001645 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001646
1647 ActivationFn activation;
1648
Sadik Armagan15d63e22019-07-26 16:59:35 +01001649 auto inputSize = operation.inputs.size();
1650
1651 if (inputSize >= 10)
1652 {
1653 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1654 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1655 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1656 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1657 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1658 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1659 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1660 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1661 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1662 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1663 {
1664 return Fail("%s: Operation has invalid inputs", operationName);
1665 }
1666
Kevin May42477c12020-03-26 13:34:14 +00001667 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001668 {
1669 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1670 }
1671 }
1672 else
arovir01b0717b52018-09-05 17:03:25 +01001673 {
1674 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1675 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001676 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1677 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1678 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1679 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1680 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1681 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001682 {
1683 return Fail("%s: Operation has invalid inputs", operationName);
1684 }
1685
Kevin May42477c12020-03-26 13:34:14 +00001686 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001687 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001688 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001689 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001690
1691 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1692 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1693 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1694
1695 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1696 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001697 }
1698
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001699 bool isSupported = false;
1700 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1701 IsPooling2dSupported,
1702 data.m_Backends,
1703 isSupported,
1704 inputInfo,
1705 outputInfo,
1706 desc);
1707 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001708 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001709 return false;
arovir01b0717b52018-09-05 17:03:25 +01001710 }
arovir01b0717b52018-09-05 17:03:25 +01001711
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001712 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1713 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001714 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001715 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001716 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001717
1718 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1719 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001720 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001721 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001722 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001723
1724 input.Connect(pooling2dLayer->GetInputSlot(0));
1725
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001726 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001727}
1728
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001729template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001730 typename HalOperation = typename HalPolicy::Operation,
1731 typename HalModel = typename HalPolicy::Model>
1732bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001733{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001734 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001735
1736 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1737 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1738
1739 if (!input0.IsValid() || !input1.IsValid())
1740 {
1741 return Fail("%s: Operation has invalid inputs", __func__);
1742 }
1743
1744 // The FuseActivation parameter is always the input index 2
1745 // and it should be optional
1746 ActivationFn activationFunction;
1747 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1748 {
1749 return Fail("%s: Operation has invalid inputs", __func__);
1750 }
1751
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001752 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001753 if (!outputOperand)
1754 {
1755 return false;
1756 }
1757
1758 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1759 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1760
1761 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1762 if (IsDynamicTensor(outputInfo))
1763 {
1764 return Fail("%s: Dynamic output tensors are not supported", __func__);
1765 }
1766
1767 bool isSupported = false;
1768 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1769 IsAdditionSupported,
1770 data.m_Backends,
1771 isSupported,
1772 inputInfo0,
1773 inputInfo1,
1774 outputInfo);
1775 if (!isSupported)
1776 {
1777 return false;
1778 }
1779
1780 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1781 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1782
1783 if (endLayer != nullptr)
1784 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001785 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001786 if (!isReshapeSupported)
1787 {
1788 return false;
1789 }
1790
Mike Kelly46272802019-08-14 17:00:48 +01001791 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1792 }
1793 else
1794 {
1795 return Fail("%s: ProcessActivation failed", __func__);
1796 }
1797}
1798
1799template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001800 typename HalOperation = typename HalPolicy::Operation,
1801 typename HalModel = typename HalPolicy::Model>
1802bool ConvertArgMinMax(const HalOperation& operation,
1803 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001804 ConversionData& data,
1805 armnn::ArgMinMaxFunction argMinMaxFunction)
1806{
1807 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1808
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001809 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001810 using HalOperandType = typename HalPolicy::OperandType;
1811
1812 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1813
1814 if (!input0.IsValid())
1815 {
1816 return Fail("%s: Operation has invalid inputs", __func__);
1817 }
1818
1819 int32_t axis;
1820 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1821 {
1822 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1823 }
1824
1825 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1826 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1827
1828 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1829 {
1830 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1831 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1832 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1833 return Fail("%s: Axis must be in range [-n, n)", __func__);
1834 }
1835
1836 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1837 if (!output)
1838 {
1839 return Fail("%s: Could not read output 0", __func__);
1840 }
1841
1842 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1843
1844 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1845 if (IsDynamicTensor(outputInfo))
1846 {
1847 return Fail("%s: Dynamic output tensors are not supported", __func__);
1848 }
1849
1850 armnn::ArgMinMaxDescriptor descriptor;
1851 descriptor.m_Function = argMinMaxFunction;
1852 descriptor.m_Axis = axis;
1853
1854 bool isSupported = false;
1855 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1856 IsArgMinMaxSupported,
1857 data.m_Backends,
1858 isSupported,
1859 inputInfo0,
1860 outputInfo,
1861 descriptor);
1862 if (!isSupported)
1863 {
1864 return false;
1865 }
1866
1867 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1868 assert(layer != nullptr);
1869
1870 input0.Connect(layer->GetInputSlot(0));
1871
1872 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1873}
1874
1875template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001876 typename HalOperation = typename HalPolicy::Operation,
1877 typename HalModel = typename HalPolicy::Model>
1878bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001879{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001880 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001881 using HalOperandType = typename HalPolicy::OperandType;
1882
1883 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1884 if (operation.inputs.size() <= 1)
1885 {
1886 return Fail("%s: Operation has insufficient arguments", __func__);
1887 }
1888
1889 // Get inputs and outputs
1890 const std::size_t numInputTensors = operation.inputs.size() - 1;
1891
1892 int32_t concatDim;
1893 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1894 {
1895 return Fail("%s: Operation has invalid inputs", __func__);
1896 }
1897
1898 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1899 if (!outputOperand)
1900 {
1901 return Fail("%s: Operation has no outputs", __func__);
1902 }
1903
1904
1905 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1906 armnn::TensorShape outputShape = outputInfo.GetShape();
1907
1908 //
1909 // handle negative concat dims along the lines of tensorflow as described here:
1910 // https://www.tensorflow.org/api_docs/python/tf/concat
1911 // "negative axis refers to axis + rank(values)-th dimension"
1912 //
1913 if (concatDim < 0)
1914 {
1915 concatDim += outputShape.GetNumDimensions();
1916 }
1917
1918 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1919 {
1920 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1921 }
1922
1923 std::vector<LayerInputHandle> inputHandles;
1924 std::vector<armnn::TensorShape> inputShapes;
1925
1926 inputHandles.reserve(numInputTensors);
1927 inputShapes.reserve(numInputTensors);
1928
1929 bool inputsHaveBeenReshaped = false;
1930 unsigned int tensorDimensionsAdded = 0;
1931
1932 for (uint32_t i = 0; i < numInputTensors; ++i)
1933 {
1934 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1935 if (!operand)
1936 {
1937 return Fail("%s: Operation has invalid inputs", __func__);
1938 }
1939
Teresa Charlin3b959602019-10-31 17:05:47 +00001940 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1941 if (!operandInputHandle.IsValid())
1942 {
1943 return Fail("%s: Operation has invalid inputs", __func__);
1944 }
Mike Kellyb8805202019-07-31 17:25:43 +01001945
Teresa Charlin3b959602019-10-31 17:05:47 +00001946 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001947 if (operandShape.GetNumDimensions() == 0)
1948 {
1949 return Fail("%s: Operands with rank 0 are not supported", __func__);
1950 }
1951
1952 if (RequiresReshape(operandShape))
1953 {
1954 inputsHaveBeenReshaped = true;
1955
1956 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1957
1958 // Expand the tensor to three dimensions
1959 if (operandShape.GetNumDimensions() == 2)
1960 {
1961 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1962 tensorDimensionsAdded = 1;
1963 }
1964 else
1965 {
1966 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1967 tensorDimensionsAdded = 2;
1968 }
1969
Kevin Mayaed08ac2019-12-12 16:33:31 +00001970 armnn::ReshapeDescriptor reshapeDescriptor;
1971 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1972
1973 bool isSupported = false;
1974 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1975 IsReshapeSupported,
1976 data.m_Backends,
1977 isSupported,
1978 operandInputHandle.GetTensorInfo(),
1979 reshapeInfo,
1980 reshapeDescriptor);
1981 if (!isSupported)
1982 {
1983 return false;
1984 }
1985
Mike Kellyb8805202019-07-31 17:25:43 +01001986 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1987 *data.m_Network,
1988 operandInputHandle,
1989 reshapeInfo
1990 );
1991
1992 // Point to the reshape operation rather then the input operation
1993 operandShape = reshapeInfo.GetShape();
1994 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1995 }
1996
1997 inputShapes.emplace_back(operandShape);
1998 inputHandles.emplace_back(operandInputHandle);
1999
2000 if (!inputHandles.back().IsValid())
2001 {
2002 return Fail("%s: Operation has invalid inputs", __func__);
2003 }
2004 }
2005
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002006 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002007
2008 if (inputsHaveBeenReshaped)
2009 {
2010 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2011 concatDim += tensorDimensionsAdded;
2012
2013 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2014 if (tensorDimensionsAdded == 1)
2015 {
2016 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2017 }
2018 else if (tensorDimensionsAdded == 2)
2019 {
2020 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2021 }
2022 }
2023
2024 // Check if permutations is required and get the pair of permutations required for the concatenation.
2025 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2026 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2027 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2028
2029 bool needPermute =
2030 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2031
2032 if (needPermute)
2033 {
Mike Kelly4a956582020-02-28 10:32:09 +00002034 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002035 }
2036
2037 outputInfo.SetShape(outputShape);
2038
2039 // this is no-op for identity swizzles, otherwise it replaces both
2040 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002041 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002042 {
2043 return false;
2044 }
Mike Kellyb8805202019-07-31 17:25:43 +01002045
2046 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2047 armnn::OriginsDescriptor concatDescriptor;
2048
2049 try
2050 {
2051 // The concat descriptor is always created across the only supported concat dimension
2052 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2053 concatDescriptor =
2054 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2055 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002056 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002057 {
2058 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2059 }
2060
2061 // Validate the output shape is correct given the input shapes based on the
2062 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2063 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2064 {
2065 return Fail("%s: Error validating the output shape for concat", __func__);
2066 }
2067
2068 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2069 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2070 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2071
2072 bool isSupported = false;
2073 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2074 IsConcatSupported,
2075 data.m_Backends,
2076 isSupported,
2077 inputTensorInfos,
2078 outputInfo,
2079 concatDescriptor);
2080 if (!isSupported)
2081 {
2082 return false;
2083 }
2084
2085 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2086 assert(layer != nullptr);
2087 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2088
2089 // Connect inputs to the layer
2090 const int numInputSlots = layer->GetNumInputSlots();
2091 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2092 for (int i = 0; i < numInputSlots; ++i)
2093 {
2094 // connect the input directly to the merge (concat) layer
2095 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2096 }
2097
2098 if (needPermute)
2099 {
Mike Kelly4a956582020-02-28 10:32:09 +00002100 armnn::TransposeDescriptor transposeDesc;
2101 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002102 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2103 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2104 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002105
2106 bool isSupported = false;
2107 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002108 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002109 data.m_Backends,
2110 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002111 inputTransposeInfo,
2112 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002113 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002114 if (!isSupported)
2115 {
2116 return false;
2117 }
Mike Kellyb8805202019-07-31 17:25:43 +01002118 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002119 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2120 layer->GetOutputSlot(0),
2121 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002122 layer = &deswizzleLayer;
2123 }
2124
2125 if (inputsHaveBeenReshaped)
2126 {
2127 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2128
2129 // Undo the reshape knowing the amount of dimensions added
2130 if (tensorDimensionsAdded == 1)
2131 {
2132 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2133 afterConcatInfo.GetShape()[2] }));
2134 }
2135 else if (tensorDimensionsAdded == 2)
2136 {
2137 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2138 }
2139
Kevin Mayaed08ac2019-12-12 16:33:31 +00002140 armnn::ReshapeDescriptor reshapeDescriptor;
2141 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2142
2143 bool isSupported = false;
2144 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2145 IsReshapeSupported,
2146 data.m_Backends,
2147 isSupported,
2148 layer->GetOutputSlot(0).GetTensorInfo(),
2149 afterConcatInfo,
2150 reshapeDescriptor);
2151 if (!isSupported)
2152 {
2153 return false;
2154 }
2155
Mike Kellyb8805202019-07-31 17:25:43 +01002156 layer = &AddReshapeLayer(
2157 *data.m_Network,
2158 layer->GetOutputSlot(0),
2159 afterConcatInfo
2160 );
2161 }
2162
2163 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2164}
2165
2166template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002167 typename HalOperation = typename HalPolicy::Operation,
2168 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002169bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2170{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002171 using HalOperand = typename HalPolicy::Operand;
2172 using HalOperandType = typename HalPolicy::OperandType;
2173
2174 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002175 if (!input.IsValid())
2176 {
2177 return Fail("%s: Operation has invalid inputs", __func__);
2178 }
2179
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002180 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002181 if (!output)
2182 {
2183 return Fail("%s: Could not read output 0", __func__);
2184 }
2185
2186 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002187 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002188
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002189 if (IsDynamicTensor(outputInfo))
2190 {
2191 return Fail("%s: Dynamic output tensors are not supported", __func__);
2192 }
2193
Mike Kellyb5fdf382019-06-11 16:35:25 +01002194 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002195 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2196 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002197
2198 if (!weightsPin.IsValid() || !biasPin.IsValid())
2199 {
2200 return Fail("%s: Operation has invalid inputs", __func__);
2201 }
2202
2203 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002204 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002205 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2206
2207 armnn::Convolution2dDescriptor desc;
2208 desc.m_DataLayout = armnn::DataLayout::NHWC;
2209 ActivationFn activation;
2210
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002211 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002212 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002213 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2214 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2215 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2216 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2217 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2218 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002219 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002220 {
2221 return Fail("%s: Operation has invalid inputs", __func__);
2222 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002223 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002224 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002225 {
2226 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002227 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2228 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2229 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002230 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002231 {
2232 return Fail("%s: Operation has invalid inputs", __func__);
2233 }
2234
2235 const uint32_t kernelX = weights.GetShape()[2];
2236 const uint32_t kernelY = weights.GetShape()[1];
2237 const uint32_t inputX = inputInfo.GetShape()[2];
2238 const uint32_t inputY = inputInfo.GetShape()[1];
2239
2240 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2241 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002242 }
2243 else
2244 {
2245 return Fail("%s: Unsupported number of operation inputs", __func__);
2246 }
2247
2248 desc.m_BiasEnabled = true;
2249 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2250
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002251 bool isSupported = false;
2252 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2253 IsConvolution2dSupported,
2254 data.m_Backends,
2255 isSupported,
2256 inputInfo,
2257 outputInfo,
2258 desc,
2259 weights.GetInfo(),
2260 biases);
2261 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002262 {
2263 return false;
2264 }
2265
2266 armnn::IConnectableLayer* startLayer =
2267 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2268
2269 if (!startLayer)
2270 {
2271 return Fail("%s: AddConvolution2dLayer failed", __func__);
2272 }
2273
2274 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2275
2276 if (!endLayer)
2277 {
2278 return Fail("%s: ProcessActivation failed", __func__);
2279 }
2280
2281 input.Connect(startLayer->GetInputSlot(0));
2282
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002283 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002284}
2285
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002286template<typename HalPolicy,
2287 typename HalOperation = typename HalPolicy::Operation,
2288 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002289bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2290{
2291 using HalOperand = typename HalPolicy::Operand;
2292 using HalOperandType = typename HalPolicy::OperandType;
2293
2294 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2295 if (!input.IsValid() )
2296 {
2297 return Fail("%s: Operation has invalid inputs", __func__);
2298 }
2299
2300 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2301 unsigned int rank = inputInfo.GetNumDimensions();
2302 if (rank != 4)
2303 {
2304 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2305 }
2306
2307 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2308 if (!output)
2309 {
2310 return Fail("%s: Could not read output 0", __func__);
2311 }
2312
2313 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2314 if (IsDynamicTensor(outputInfo))
2315 {
2316 return Fail("%s: Dynamic output tensors are not supported", __func__);
2317 }
2318
2319 armnn::DepthToSpaceDescriptor descriptor;
2320
2321 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2322 if (descriptor.m_BlockSize <= 1)
2323 {
2324 return Fail("%s: Block size must be at least 1 in all dimensions");
2325 }
2326
2327 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002328 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002329 {
2330 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2331 }
2332
2333 bool isSupported = false;
2334 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2335 IsDepthToSpaceSupported,
2336 data.m_Backends,
2337 isSupported,
2338 inputInfo,
2339 outputInfo,
2340 descriptor);
2341 if (!isSupported)
2342 {
2343 return false;
2344 }
2345
2346 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2347 assert(layer != nullptr);
2348 input.Connect(layer->GetInputSlot(0));
2349
2350 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2351}
2352
2353template<typename HalPolicy,
2354 typename HalOperation = typename HalPolicy::Operation,
2355 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002356bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2357{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002358 using HalOperand = typename HalPolicy::Operand;
2359 using HalOperandType = typename HalPolicy::OperandType;
2360
2361 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002362
2363 if (!input.IsValid())
2364 {
2365 return Fail("%s: Operation has invalid inputs", __func__);
2366 }
2367
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002368 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002369
2370 if (!output)
2371 {
2372 return Fail("%s: Could not read output 0", __func__);
2373 }
2374
2375 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002376 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002377
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002378 if (IsDynamicTensor(outputInfo))
2379 {
2380 return Fail("%s: Dynamic output tensors are not supported", __func__);
2381 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002382
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002383 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002385 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002386
2387 if (weightsOperand == nullptr)
2388 {
2389 return Fail("%s: Operand is invalid", __func__);
2390 }
2391 armnn::DepthwiseConvolution2dDescriptor desc;
2392 desc.m_DataLayout = armnn::DataLayout::NHWC;
2393
Mike Kellyb5fdf382019-06-11 16:35:25 +01002394 // Reinterpret weight data as [ H, W, I, M ]
2395 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2396 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002397 inputInfo.GetShape()[3],
2398 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002399
2400 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2401 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2402
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002403 const ConstTensorPin weightsPin =
2404 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2405 1,
2406 model,
2407 data,
2408 HWIMToMIHW,
2409 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002410
2411 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002412 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002413
2414 if (!weightsPin.IsValid() || !biasPin.IsValid())
2415 {
2416 return Fail("%s: Operation has invalid inputs", __func__);
2417 }
2418
2419 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2420 armnn::ConstTensor bias = biasPin.GetConstTensor();
2421 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2422
2423 ActivationFn activation;
2424
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002425 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002426 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002427 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2428 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2429 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2430 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2431 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2432 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002433 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002434 {
2435 return Fail("%s: Operation has invalid inputs", __func__);
2436 }
2437 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002438 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002439 {
2440 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002441 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2442 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2443 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002444 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002445 {
2446 return Fail("%s: Operation has invalid inputs", __func__);
2447 }
2448
2449 const uint32_t kernelX = weights.GetShape()[3];
2450 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002451 const uint32_t inputX = inputInfo.GetShape()[2];
2452 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002453
2454 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2455 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2456 }
2457 else
2458 {
2459 return Fail("%s: Unsupported number of operation inputs", __func__);
2460 }
2461
2462 desc.m_BiasEnabled = true;
2463 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2464
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002465 bool isSupported = false;
2466 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2467 IsDepthwiseConvolutionSupported,
2468 data.m_Backends,
2469 isSupported,
2470 inputInfo,
2471 outputInfo,
2472 desc,
2473 weights.GetInfo(),
2474 biases);
2475 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002476 {
2477 return false;
2478 }
2479
2480 armnn::IConnectableLayer* startLayer =
2481 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2482 if (!startLayer)
2483 {
2484 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2485 }
2486
2487 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2488 if (!endLayer)
2489 {
2490 return Fail("%s: ProcessActivation failed", __func__);
2491 }
2492
2493 input.Connect(startLayer->GetInputSlot(0));
2494
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002495 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002496}
2497
Mike Kelly3c673942019-07-25 09:26:06 +01002498template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002499 typename HalOperation = typename HalPolicy::Operation,
2500 typename HalModel = typename HalPolicy::Model>
2501bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002502{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002503 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002504
2505 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2506 if (!input.IsValid())
2507 {
2508 return Fail("%s: Operation has invalid input", __func__);
2509 }
2510
Sadik Armagan98c0f662019-11-21 15:54:36 +00002511 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2512 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2513 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2514 {
2515 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2516 }
2517
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002518 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002519 if (!outputOperand)
2520 {
2521 return Fail("%s: Operation has invalid outputs", __func__);
2522 }
2523
2524 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2525 if (IsDynamicTensor(outputInfo))
2526 {
2527 return Fail("%s: Dynamic output tensors are not supported", __func__);
2528 }
2529
2530 bool isSupported = false;
2531 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2532 IsDequantizeSupported,
2533 data.m_Backends,
2534 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002535 inputInfo,
2536 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002537 if (!isSupported)
2538 {
2539 return false;
2540 }
2541
2542 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2543 assert(layer != nullptr);
2544 input.Connect(layer->GetInputSlot(0));
2545
2546 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2547}
2548
2549template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002550 typename HalOperation = typename HalPolicy::Operation,
2551 typename HalModel = typename HalPolicy::Model>
2552bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002553{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002554 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002555
2556 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2557 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2558
2559 if (!input0.IsValid() || !input1.IsValid())
2560 {
2561 return Fail("%s: Operation has invalid inputs", __func__);
2562 }
2563
2564 // The FuseActivation parameter is always the input index 2
2565 // and it should be optional
2566 ActivationFn activationFunction;
2567 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2568 {
2569 return Fail("%s: Operation has invalid inputs", __func__);
2570 }
2571
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002572 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002573 if (!output)
2574 {
2575 return Fail("%s: Could not read output 0", __func__);
2576 }
2577
2578 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2579 if (IsDynamicTensor(outputInfo))
2580 {
2581 return Fail("%s: Dynamic output tensors are not supported", __func__);
2582 }
2583
2584 bool isSupported = false;
2585 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2586 IsDivisionSupported,
2587 data.m_Backends,
2588 isSupported,
2589 input0.GetTensorInfo(),
2590 input1.GetTensorInfo(),
2591 outputInfo);
2592 if (!isSupported)
2593 {
2594 return false;
2595 }
2596
2597 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2598 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2599
2600 if (endLayer)
2601 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002602 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002603 if (!isReshapeSupported)
2604 {
2605 return false;
2606 }
2607
Mike Kelly46272802019-08-14 17:00:48 +01002608 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2609 }
2610 return Fail("%s: ProcessActivation failed", __func__);
2611}
2612
2613template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002614 typename HalOperation = typename HalPolicy::Operation,
2615 typename HalModel = typename HalPolicy::Model>
2616bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002617{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002618 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002619
2620 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2621 if (!input.IsValid())
2622 {
2623 return Fail("%s: Operation has invalid inputs", __func__);
2624 }
2625
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002626 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002627 if (!outputOperand)
2628 {
2629 return Fail("%s: Operation has invalid outputs", __func__);
2630 }
2631
2632 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2633 if (IsDynamicTensor(outputInfo))
2634 {
2635 return Fail("%s: Dynamic output tensors are not supported", __func__);
2636 }
2637
2638 bool isSupported = false;
2639 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2640 IsFloorSupported,
2641 data.m_Backends,
2642 isSupported,
2643 input.GetTensorInfo(),
2644 outputInfo);
2645 if (!isSupported)
2646 {
2647 return false;
2648 }
2649
2650 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2651 assert(layer != nullptr);
2652 input.Connect(layer->GetInputSlot(0));
2653
2654 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2655}
2656
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002657inline bool IsQSymm8(const V1_0::Operand&)
2658{
2659 return false;
2660}
2661
Kevin May42477c12020-03-26 13:34:14 +00002662#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002663
2664inline bool IsQSymm8(const V1_2::Operand& operand)
2665{
2666 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2667}
2668
2669#endif
2670
Kevin May42477c12020-03-26 13:34:14 +00002671#ifdef ARMNN_ANDROID_NN_V1_3
2672
2673inline bool IsQSymm8(const V1_3::Operand& operand)
2674{
2675 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2676}
2677
2678#endif
2679
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002680enum class DequantizeStatus
2681{
2682 SUCCESS,
2683 NOT_REQUIRED,
2684 INVALID_OPERAND
2685};
2686
2687using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2688
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002689template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002690 typename HalOperation = typename HalPolicy::Operation,
2691 typename HalModel = typename HalPolicy::Model>
2692DequantizeResult DequantizeIfRequired(size_t operand_index,
2693 const HalOperation& operation,
2694 const HalModel& model,
2695 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002696{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002697 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002698
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002699 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002700 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002701 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002702 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002703 }
2704
2705 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2706 {
2707 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002708 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002709 }
2710
2711 const size_t weightsInputIndex = operation.inputs[operand_index];
2712
2713 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2714 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002715 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002716 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002717 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002718 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002719 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2720 {
2721 continue;
2722 }
2723
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002724 size_t outOpIndex = weightsInputIndex + 1;
2725 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002726 {
2727 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002728 }
2729
2730 if (outOpIndex != weightsInputIndex)
2731 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002732 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002733 }
2734
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002735 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002736 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002737
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002738 if (!IsQSymm8(*operand))
2739 {
2740 // Only supporting dequantize from QSYMM8 to FLOAT
2741 break;
2742 }
2743
2744 // Allocate a new buffer for the dequantized data and manually dequantize
2745 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2746 if (!startValue)
2747 {
2748 // Failed to get the operand address
2749 break;
2750 }
2751
2752 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2753 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002754 const float quantizationScale = operand->scale;
2755
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002756 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2757 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2758 {
2759 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002760 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002761 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2762 }
2763
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002764 // Construct tensor info for dequantized ConstTensor
2765 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2766 operand->dimensions.data(),
2767 armnn::DataType::Float32);
2768
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002769 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2770 std::move(tensorInfo),
2771 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002772 }
2773
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002774 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002775}
2776
2777template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002778 typename HalOperation = typename HalPolicy::Operation,
2779 typename HalModel = typename HalPolicy::Model>
2780ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2781 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002782 const ConversionData& data,
2783 size_t operandIndex,
2784 bool optional = false)
2785{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002786 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2787
2788 DequantizeStatus status = std::get<3>(dequantized);
2789 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002790 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002791 case DequantizeStatus::INVALID_OPERAND:
2792 {
2793 // return invalid const tensor pin
2794 return ConstTensorPin();
2795 }
2796 case DequantizeStatus::NOT_REQUIRED:
2797 {
2798 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2799 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2800 }
2801 case DequantizeStatus::SUCCESS:
2802 default:
2803 {
2804 return ConstTensorPin(
2805 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2806 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002807 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002808}
2809
2810
Mike Kelly46272802019-08-14 17:00:48 +01002811template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002812 typename HalOperation = typename HalPolicy::Operation,
2813 typename HalModel = typename HalPolicy::Model>
2814bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002815{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002816 using HalOperand = typename HalPolicy::Operand;
2817
Mike Kelly46272802019-08-14 17:00:48 +01002818 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2819 if (!input.IsValid())
2820 {
2821 return Fail("%s: Operation has invalid inputs", __func__);
2822 }
2823
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002824 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002825 if (!output)
2826 {
2827 return Fail("%s: Could not read output 0", __func__);
2828 }
2829
2830 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2831 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2832
2833 if (IsDynamicTensor(outputInfo))
2834 {
2835 return Fail("%s: Dynamic output tensors are not supported", __func__);
2836 }
2837
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002838 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2839 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002840
2841 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002842 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002843 return Fail("%s: Operation has invalid weights", __func__);
2844 }
2845
2846 if (!biasPin.IsValid())
2847 {
2848 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002849 }
2850
2851 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2852 armnn::ConstTensor bias = biasPin.GetConstTensor();
2853 armnn::TensorInfo reshapedInfo = inputInfo;
2854
2855 try
2856 {
2857 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002858 }
2859 catch (const std::exception& e)
2860 {
Mike Kelly46272802019-08-14 17:00:48 +01002861 return Fail("%s: %s", __func__, e.what());
2862 }
2863
2864 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2865 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2866
2867 ActivationFn activationFunction;
2868 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2869 {
2870 return Fail("%s: Operation has invalid inputs", __func__);
2871 }
2872
2873 armnn::FullyConnectedDescriptor desc;
2874 desc.m_TransposeWeightMatrix = true;
2875 desc.m_BiasEnabled = true;
2876
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002877 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2878 weights.GetInfo().GetShape(),
2879 outputInfo.GetShape(),
2880 desc.m_TransposeWeightMatrix))
2881 {
2882 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2883 }
2884
Mike Kelly46272802019-08-14 17:00:48 +01002885 bool isSupported = false;
2886 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2887 IsFullyConnectedSupported,
2888 data.m_Backends,
2889 isSupported,
2890 reshapedInfo,
2891 outputInfo,
2892 weights.GetInfo(),
2893 bias.GetInfo(),
2894 desc);
2895 if (!isSupported)
2896 {
2897 return false;
2898 }
2899
2900 armnn::IConnectableLayer* startLayer =
2901 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2902 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2903
2904 if (endLayer != nullptr)
2905 {
2906 if (inputInfo.GetNumDimensions() > 2U)
2907 {
2908 armnn::ReshapeDescriptor reshapeDescriptor;
2909 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2910
2911 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2912 assert(reshapeLayer != nullptr);
2913 input.Connect(reshapeLayer->GetInputSlot(0));
2914 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2915 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2916 }
2917 else
2918 {
2919 input.Connect(startLayer->GetInputSlot(0));
2920 }
2921
2922 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2923 }
2924 else
2925 {
2926 return Fail("%s: ProcessActivation failed", __func__);
2927 }
2928}
2929
2930template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002931 typename HalOperation = typename HalPolicy::Operation,
2932 typename HalModel = typename HalPolicy::Model>
2933bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002934{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002935 using HalOperand = typename HalPolicy::Operand;
2936
Mike Kelly999e2092019-08-15 10:46:46 +01002937 if (operation.inputs.size() != 1)
2938 {
2939 return Fail("%s: Optional inputs are not supported", __func__);
2940 }
2941
Mike Kelly46272802019-08-14 17:00:48 +01002942 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2943 if (!input.IsValid())
2944 {
2945 return Fail("%s: Operation has invalid inputs", __func__);
2946 }
2947
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002948 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002949 if (!output)
2950 {
2951 return Fail("%s: Could not read output 0", __func__);
2952 }
2953
2954 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2955 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2956
2957 if (IsDynamicTensor(outputInfo))
2958 {
2959 return Fail("%s: Dynamic output tensors are not supported", __func__);
2960 }
2961 if (outputInfo.GetNumDimensions() != 4u)
2962 {
2963 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2964 }
2965
2966 armnn::L2NormalizationDescriptor desc;
2967 desc.m_DataLayout = armnn::DataLayout::NHWC;
2968
2969 bool isSupported = false;
2970 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2971 IsL2NormalizationSupported,
2972 data.m_Backends,
2973 isSupported,
2974 inputInfo,
2975 outputInfo,
2976 desc);
2977 if (!isSupported)
2978 {
2979 return false;
2980 }
2981
2982 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2983 assert(layer != nullptr);
2984 input.Connect(layer->GetInputSlot(0));
2985
2986 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2987}
2988
2989template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002990 typename HalOperation = typename HalPolicy::Operation,
2991 typename HalModel = typename HalPolicy::Model>
2992bool ConvertLocalResponseNormalization(const HalOperation& operation,
2993 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002994 ConversionData& data)
2995{
Mike Kelly999e2092019-08-15 10:46:46 +01002996 if (operation.inputs.size() != 5)
2997 {
2998 return Fail("%s: Optional inputs are not supported", __func__);
2999 }
3000
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003001 using HalOperand = typename HalPolicy::Operand;
3002 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003003
3004 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3005 if (!input.IsValid())
3006 {
3007 return Fail("%s: Operation has invalid inputs", __func__);
3008 }
3009
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003010 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003011 if (!output)
3012 {
3013 return Fail("%s: Could not read output 0", __func__);
3014 }
3015
3016 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3017 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3018
3019 if (IsDynamicTensor(outputInfo))
3020 {
3021 return Fail("%s: Dynamic output tensors are not supported", __func__);
3022 }
3023 if (outputInfo.GetNumDimensions() != 4u)
3024 {
3025 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3026 }
3027
3028 armnn::NormalizationDescriptor descriptor;
3029 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3030 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3031 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3032
3033 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003034 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003035 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3036 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3037 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3038 {
3039 return Fail("%s: Operation has invalid inputs", __func__);
3040 }
3041
3042 // ArmNN expects normSize to be the full size of the normalization
3043 // window rather than the radius as in AndroidNN.
3044 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3045
3046 bool isSupported = false;
3047 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3048 IsNormalizationSupported,
3049 data.m_Backends,
3050 isSupported,
3051 inputInfo,
3052 outputInfo,
3053 descriptor);
3054 if (!isSupported)
3055 {
3056 return false;
3057 }
3058
3059
3060 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3061 assert(layer != nullptr);
3062 input.Connect(layer->GetInputSlot(0));
3063
3064 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3065}
3066
3067template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003068 typename HalOperation = typename HalPolicy::Operation,
3069 typename HalModel = typename HalPolicy::Model>
3070bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003071{
Mike Kelly46272802019-08-14 17:00:48 +01003072 armnn::ActivationDescriptor desc;
3073 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3074
3075 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3076}
3077
3078template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003079 typename HalOperation = typename HalPolicy::Operation,
3080 typename HalModel = typename HalPolicy::Model>
3081bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003082{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003083 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003084
3085 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3086 if (!input.IsValid())
3087 {
3088 return Fail("%s: Operation has invalid inputs", __func__);
3089 }
3090
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003091 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003092 if (!output)
3093 {
3094 return Fail("%s: Could not read output 0", __func__);
3095 }
3096
3097 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3098 if (IsDynamicTensor(outputInfo))
3099 {
3100 return Fail("%s: Dynamic output tensors are not supported", __func__);
3101 }
3102
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003103 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003104 if (!axisOperand)
3105 {
3106 return Fail("%s: Could not read input 1", __func__);
3107 }
3108
3109 std::vector<int32_t> axis;
3110 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3111 {
3112 return Fail("%s: Input 1 has invalid values", __func__);
3113 }
3114
3115 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3116
3117 // Convert the axis to unsigned int and remove duplicates.
3118 unsigned int rank = inputInfo.GetNumDimensions();
3119 std::set<unsigned int> uniqueAxis;
3120 std::transform(axis.begin(), axis.end(),
3121 std::inserter(uniqueAxis, uniqueAxis.begin()),
3122 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3123
3124 // Get the "keep dims" flag.
3125 int32_t keepDims = 0;
3126 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3127 {
3128 return Fail("%s: Could not read input 2", __func__);
3129 }
3130
3131 armnn::MeanDescriptor descriptor;
3132 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3133 descriptor.m_KeepDims = keepDims > 0;
3134
3135 bool isSupported = false;
3136 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3137 IsMeanSupported,
3138 data.m_Backends,
3139 isSupported,
3140 inputInfo,
3141 outputInfo,
3142 descriptor);
3143 if (!isSupported)
3144 {
3145 return false;
3146 }
3147
3148 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3149 assert(layer != nullptr);
3150 input.Connect(layer->GetInputSlot(0));
3151
3152 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3153}
3154
3155template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003156 typename HalOperation = typename HalPolicy::Operation,
3157 typename HalModel = typename HalPolicy::Model>
3158bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003159{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003160 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003161
3162 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3163 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3164
3165 if (!input0.IsValid() || !input1.IsValid())
3166 {
3167 return Fail("%s: Operation has invalid inputs", __func__);
3168 }
3169
3170 // The FuseActivation parameter is always the input index 2
3171 // and it should be optional
3172 ActivationFn activationFunction;
3173 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3174 {
3175 return Fail("%s: Operation has invalid inputs", __func__);
3176 }
3177
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003178 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003179
3180 if (outputOperand == nullptr)
3181 {
3182 return false;
3183 }
3184
3185 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3186 if (IsDynamicTensor(outputInfo))
3187 {
3188 return Fail("%s: Dynamic output tensors are not supported", __func__);
3189 }
3190
3191 bool isSupported = false;
3192 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3193 IsMultiplicationSupported,
3194 data.m_Backends,
3195 isSupported,
3196 input0.GetTensorInfo(),
3197 input1.GetTensorInfo(),
3198 outputInfo);
3199 if (!isSupported)
3200 {
3201 return false;
3202 }
3203
3204 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3205 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3206
3207 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3208 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3209
3210 if (endLayer != nullptr)
3211 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003212 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003213 if (!isReshapeSupported)
3214 {
3215 return false;
3216 }
3217
Mike Kelly46272802019-08-14 17:00:48 +01003218 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3219 }
3220 else
3221 {
3222 return Fail("%s: ProcessActivation failed", __func__);
3223 }
3224}
3225
3226template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003227 typename HalOperation = typename HalPolicy::Operation,
3228 typename HalModel = typename HalPolicy::Model>
3229bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003230{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003231 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003232
Mike Kelly3c673942019-07-25 09:26:06 +01003233 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3234 if (!input.IsValid())
3235 {
3236 return Fail("%s: Operation has invalid inputs", __func__);
3237 }
3238
3239 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3240 unsigned int rank = inputInfo.GetNumDimensions();
3241
3242 armnn::PadDescriptor descriptor;
3243 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3244 {
3245 return Fail("%s: Could not convert paddings", __func__);
3246 }
3247
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003248 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3249 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003250 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3251 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3252 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003253 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003254 {
3255 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3256 }
3257
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003258 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003259 if (!output)
3260 {
3261 return Fail("%s: Could not read output", __func__);
3262 }
3263
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003264 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003265 if (IsDynamicTensor(outputInfo))
3266 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003267 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003268 }
3269
3270 bool isSupported = false;
3271 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3272 IsPadSupported,
3273 data.m_Backends,
3274 isSupported,
3275 inputInfo,
3276 outputInfo,
3277 descriptor);
3278 if (!isSupported)
3279 {
3280 return false;
3281 }
3282
3283 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3284 assert(layer != nullptr);
3285 input.Connect(layer->GetInputSlot(0));
3286 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3287
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003288 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003289}
3290
Mike Kelly0a879362019-07-29 16:56:31 +01003291template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003292 typename HalOperation = typename HalPolicy::Operation,
3293 typename HalModel = typename HalPolicy::Model>
3294bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003295{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003296 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003297
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003298 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3299 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3300 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003301
3302 if (inputOperand == nullptr
3303 || requestedShapeOperand == nullptr
3304 || outputOperand == nullptr)
3305 {
3306 return Fail("%s: Operation has invalid inputs", __func__);
3307 }
3308
3309 if (requestedShapeOperand->dimensions.size() != 1)
3310 {
3311 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3312 __func__, requestedShapeOperand->dimensions.size());
3313 }
3314
3315 std::vector<int32_t> targetDimensions;
3316 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3317 {
3318 return Fail("%s: Could not read values of input 1", __func__);
3319 }
3320
3321 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3322
3323 Shape requestedShape;
3324 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3325 // function that resolves these values into a fully specified tensor shape.
3326 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3327 {
3328 return Fail("%s: Failed to resolve the requested shape", __func__);
3329 }
3330
3331 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3332 if (!SameShape(requestedShape, outputOperandShape))
3333 {
3334 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3335 }
3336
3337 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3338 if (!input.IsValid())
3339 {
3340 return Fail("%s: Could not read input 0", __func__);
3341 }
3342
3343 armnn::ReshapeDescriptor reshapeDescriptor;
3344 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3345 requestedShape.dimensions.data());
3346
3347 bool isSupported = false;
3348 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3349 IsReshapeSupported,
3350 data.m_Backends,
3351 isSupported,
3352 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003353 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003354 reshapeDescriptor);
3355 if (!isSupported)
3356 {
3357 return false;
3358 }
3359
3360 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3361 assert(layer != nullptr);
3362 input.Connect(layer->GetInputSlot(0));
3363
3364 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3365}
3366
3367template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003368 typename HalOperation = typename HalPolicy::Operation,
3369 typename HalModel = typename HalPolicy::Model>
3370bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003371{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003372 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003373
Mike Kelly0a879362019-07-29 16:56:31 +01003374 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3375 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3376
3377 if (!input0.IsValid() || !input1.IsValid())
3378 {
3379 return Fail("%s: Operation has invalid inputs", __func__);
3380 }
3381
3382 // The FuseActivation parameter is always the input index 2
3383 // and it should be optional
3384 ActivationFn activationFunction;
3385 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3386 {
3387 return Fail("%s: Operation has invalid inputs", __func__);
3388 }
3389
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003390 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003391 if (!output)
3392 {
3393 return Fail("%s: Could not read output 0", __func__);
3394 }
3395
3396 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3397 if (IsDynamicTensor(outputInfo))
3398 {
3399 return Fail("%s: Dynamic output tensors are not supported", __func__);
3400 }
3401
3402 bool isSupported = false;
3403 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3404 IsSubtractionSupported,
3405 data.m_Backends,
3406 isSupported,
3407 input0.GetTensorInfo(),
3408 input1.GetTensorInfo(),
3409 outputInfo);
3410 if (!isSupported)
3411 {
3412 return false;
3413 }
3414
3415 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3416 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3417
3418 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3419 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3420
3421 if (endLayer)
3422 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003423 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003424 if (!isReshapeSupported)
3425 {
3426 return false;
3427 }
Mike Kelly0a879362019-07-29 16:56:31 +01003428 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3429 }
3430
3431 return Fail("%s: ProcessActivation failed", __func__);
3432}
3433
Finn Williams23b87b32019-07-30 11:44:05 +01003434template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003435 typename HalOperation = typename HalPolicy::Operation,
3436 typename HalModel = typename HalPolicy::Model>
3437bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003438{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003439 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003440
3441 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3442 if (!input.IsValid())
3443 {
3444 return Fail("%s: Operation has invalid inputs", __func__);
3445 }
3446
3447 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3448 unsigned int rank = inputInfo.GetNumDimensions();
3449 if (rank > 4)
3450 {
3451 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3452 }
3453
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003454 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003455 if (!output)
3456 {
3457 return Fail("%s: Could not read output 0", __func__);
3458 }
3459
3460 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3461 {
3462 return Fail("%s: Dynamic output tensors are not supported", __func__);
3463 }
3464
3465 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3466 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003467 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003468
3469 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3470
3471 std::vector<int32_t> axis;
3472 if (!axisOperand)
3473 {
3474 axis.assign(dimensionSequence,
3475 dimensionSequence + rank);
3476 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003477 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003478 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003479 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003480 }
3481
3482 std::vector<uint32_t> outputDims;
3483 for (unsigned int i = 0; i < rank; i++)
3484 {
3485 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3486 auto currentDimension = inputInfo.GetShape()[i];
3487 if (skipSqueeze || currentDimension != 1)
3488 {
3489 outputDims.push_back(currentDimension);
3490 }
3491 }
3492
3493 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3494
3495 armnn::TensorInfo outputInfo = inputInfo;
3496 outputInfo.SetShape(outShape);
3497
3498 armnn::ReshapeDescriptor reshapeDesc;
3499 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3500
3501 bool isSupported = false;
3502 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3503 IsReshapeSupported,
3504 data.m_Backends,
3505 isSupported,
3506 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003507 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003508 reshapeDesc);
3509 if (!isSupported)
3510 {
3511 return false;
3512 }
3513
3514 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3515 assert(layer != nullptr);
3516 input.Connect(layer->GetInputSlot(0));
3517
3518 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3519}
3520
3521template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003522 typename HalOperation = typename HalPolicy::Operation,
3523 typename HalModel = typename HalPolicy::Model>
3524bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003525{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003526 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003527
3528 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3529 if (!input.IsValid())
3530 {
3531 return Fail("%s: Operation has invalid inputs", __func__);
3532 }
3533
3534 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3535 unsigned int rank = inputInfo.GetNumDimensions();
3536 if (rank > 4)
3537 {
3538 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3539 }
3540
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003541 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003542 if (!output)
3543 {
3544 return Fail("%s: Could not read output 0", __func__);
3545 }
3546
3547 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3548 if (IsDynamicTensor(outputInfo))
3549 {
3550 return Fail("%s: Dynamic output tensors are not supported", __func__);
3551 }
3552
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003553 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3554 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3555 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003556
3557 std::vector<int32_t> beginValues;
3558 std::vector<int32_t> endValues;
3559 std::vector<int32_t> stridesValues;
3560
3561 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003562 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003563 {
3564 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3565 {
3566 return false;
3567 }
3568
3569 if (operandValues.size() != rank)
3570 {
3571 return false;
3572 }
3573
3574 return true;
3575 };
3576
3577 if (!ValidateInputOperands(*beginOperand, beginValues)
3578 || !ValidateInputOperands(*endOperand, endValues)
3579 || !ValidateInputOperands(*stridesOperand, stridesValues))
3580 {
3581 return Fail("%s: Operation has invalid input operand", __func__);
3582 }
3583
3584 // Stride cannot have value '0'
3585 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3586 {
3587 return Fail("%s: Stride must be non-zero value.", __func__);
3588 }
3589
3590 armnn::StridedSliceDescriptor descriptor;
3591 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3592 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3593 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3594 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3595
3596 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3597 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3598 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3599 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3600 {
3601 return Fail("%s: Operation has invalid inputs", __func__);
3602 }
3603
3604 bool isSupported = false;
3605 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3606 IsStridedSliceSupported,
3607 data.m_Backends,
3608 isSupported,
3609 inputInfo,
3610 outputInfo,
3611 descriptor);
3612 if (!isSupported)
3613 {
3614 return false;
3615 }
3616
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003617 // Check if slice can fit in a inferred output
3618 armnn::TensorShape inputShape = inputInfo.GetShape();
3619 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3620 {
3621 int stride = descriptor.m_Stride[i];
3622 int start = descriptor.GetStartForAxis(inputShape, i);
3623 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3624
3625 if (descriptor.m_ShrinkAxisMask & (1 << i))
3626 {
3627 // If the difference between the start point and the end point of the slice on an axis being shrunk
3628 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3629 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3630 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3631 {
3632 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3633 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003634
3635 if(stride < 0)
3636 {
3637 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3638 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003639 }
3640 }
3641
Mike Kelly46272802019-08-14 17:00:48 +01003642 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3643 assert(layer != nullptr);
3644 input.Connect(layer->GetInputSlot(0));
3645
3646 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3647}
3648
3649template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003650 typename HalOperation = typename HalPolicy::Operation,
3651 typename HalModel = typename HalPolicy::Model>
3652bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003653{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003654 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003655
3656 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3657 if (!input.IsValid())
3658 {
3659 return Fail("%s: Operation has invalid inputs", __func__);
3660 }
3661
3662 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3663 unsigned int rank = inputInfo.GetNumDimensions();
3664 if (rank > 4)
3665 {
3666 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3667 }
3668
3669 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3670 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003671 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003672
3673 std::vector<int32_t> perm(rank);
3674 if (!permOperand)
3675 {
3676 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3677 for (unsigned int i = rank; i > 0; i--)
3678 {
3679 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3680 }
3681 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003682 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003683 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003684 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003685 }
3686
3687 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3688
Mike Kelly4a956582020-02-28 10:32:09 +00003689 armnn::TransposeDescriptor transposeDesc;
3690 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003691
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003692 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003693 if (!output)
3694 {
3695 return Fail("%s: Could not read output 0", __func__);
3696 }
3697
3698 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003699 if (IsDynamicTensor(outputInfo))
3700 {
3701 return Fail("%s: Dynamic output tensors are not supported", __func__);
3702 }
3703
Mike Kelly46272802019-08-14 17:00:48 +01003704
3705 bool isSupported = false;
3706 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003707 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003708 data.m_Backends,
3709 isSupported,
3710 inputInfo,
3711 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003712 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003713 if (!isSupported)
3714 {
3715 return false;
3716 }
3717
Mike Kelly4a956582020-02-28 10:32:09 +00003718 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003719 assert(layer != nullptr);
3720 input.Connect(layer->GetInputSlot(0));
3721
3722 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3723}
3724
3725template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003726 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003727 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003728 typename HalModel = typename HalPolicy::Model>
3729bool ConvertBatchToSpaceNd(const HalOperation& operation,
3730 const HalModel& model,
3731 ConversionData& data)
3732{
Finn Williams23b87b32019-07-30 11:44:05 +01003733
3734 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3735 if (!input.IsValid())
3736 {
3737 return Fail("%s: Operation has invalid inputs", __func__);
3738 }
3739
3740 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3741 if (!output)
3742 {
3743 return Fail("%s: Could not read output 0", __func__);
3744 }
3745
3746 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3747 if (IsDynamicTensor(outputInfo))
3748 {
3749 return Fail("%s: Dynamic output tensors are not supported", __func__);
3750 }
3751
3752 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3753 if (!blockOperand)
3754 {
3755 return Fail("%s: Could not read input 1", __func__);
3756 }
3757
3758 // Convert the block operand to int32
3759 std::vector<int32_t> block;
3760 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3761 {
3762 return Fail("%s: Input 1 has invalid values", __func__);
3763 }
3764
3765 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3766
3767 unsigned int rank = inputInfo.GetNumDimensions();
3768 if (rank != 4)
3769 {
3770 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3771 }
3772
3773 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3774 {
3775 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3776 " greater than or equal to 1", __func__);
3777 }
3778
3779 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3780 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3781 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3782
Kevin May42477c12020-03-26 13:34:14 +00003783 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003784 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003785 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003786 }
3787 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3788 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3789
3790 bool isSupported = false;
3791 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3792 IsBatchToSpaceNdSupported,
3793 data.m_Backends,
3794 isSupported,
3795 inputInfo,
3796 outputInfo,
3797 batchToSpaceNdDesc);
3798 if (!isSupported)
3799 {
3800 return false;
3801 }
3802
3803 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3804 assert(layer != nullptr);
3805 input.Connect(layer->GetInputSlot(0));
3806
3807 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3808}
Mike Kelly0a879362019-07-29 16:56:31 +01003809
Finn Williamsd74c5052019-07-30 17:06:00 +01003810template<typename HalPolicy,
3811 typename HalOperation = typename HalPolicy::Operation,
3812 typename HalOperand = typename HalPolicy::Operand,
3813 typename HalModel = typename HalPolicy::Model>
3814bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3815{
3816 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3817 if (!input.IsValid())
3818 {
3819 return Fail("%s: Operation has invalid inputs", __func__);
3820 }
3821
3822 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3823 unsigned int rank = inputInfo.GetNumDimensions();
3824 unsigned int spatialDim = rank - 2;
3825
3826 if (rank != 4)
3827 {
3828 Fail("%s: Only inputs with rank 4 are supported", __func__);
3829 }
3830
3831 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3832 if (!output)
3833 {
3834 return Fail("%s: Could not read output 0", __func__);
3835 }
3836
3837 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3838 if (IsDynamicTensor(outputInfo))
3839 {
3840 return Fail("%s: Dynamic output tensors are not supported", __func__);
3841 }
3842
3843 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3844 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3845
3846 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3847 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3848 {
3849 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3850 }
3851
3852 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003853 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3854 {
3855 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3856 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003857 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3858 {
3859 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3860 }
3861
3862 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3863 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3864 {
3865 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3866 }
3867
3868 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3869 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003870 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3871 {
3872 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3873 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003874 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3875 {
3876 int paddingBeforeInput = paddings[i];
3877 int paddingAfterInput = paddings[i + 1];
3878 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3879 {
3880 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3881 }
3882
3883 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3884 }
3885
3886 armnn::SpaceToBatchNdDescriptor descriptor;
3887 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3888 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3889 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3890
Kevin May42477c12020-03-26 13:34:14 +00003891 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01003892 {
3893 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3894 }
3895
3896 bool isSupported = false;
3897 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3898 IsSpaceToBatchNdSupported,
3899 data.m_Backends,
3900 isSupported,
3901 inputInfo,
3902 outputInfo,
3903 descriptor);
3904 if (!isSupported)
3905 {
3906 return false;
3907 }
3908
3909 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3910 assert(layer != nullptr);
3911 input.Connect(layer->GetInputSlot(0));
3912
3913 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3914}
3915
saoste01b8471482018-10-10 09:44:51 +01003916} // namespace armnn_driver