blob: 5a1113174a6b7ef68cb5f6a43024e155c248e377 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Kevin May42477c12020-03-26 13:34:14 +0000186#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000192 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 type == V1_2::OperandType::TENSOR_FLOAT16 ||
194 type == V1_2::OperandType::TENSOR_FLOAT32 ||
195 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
198 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199 type == V1_2::OperandType::TENSOR_INT32;
200}
201
202#endif
203
Kevin May42477c12020-03-26 13:34:14 +0000204#ifdef ARMNN_ANDROID_NN_V1_3
205
206// Support within the 1.3 driver for specific tensor data types
207inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
208{
209 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100210 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000211 type == V1_3::OperandType::TENSOR_FLOAT16 ||
212 type == V1_3::OperandType::TENSOR_FLOAT32 ||
213 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
216 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
217 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
218 type == V1_3::OperandType::TENSOR_INT32;
219}
220
221#endif
222
Mike Kellyb5fdf382019-06-11 16:35:25 +0100223inline bool IsBool(V1_0::Operand)
224{
225 return false;
226}
227
Kevin May42477c12020-03-26 13:34:14 +0000228inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100229{
230 return false;
231}
232
Kevin May42477c12020-03-26 13:34:14 +0000233#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234
235inline bool IsBool(V1_2::Operand operand)
236{
237 return operand.type == V1_2::OperandType::BOOL;
238}
239
Sadik Armagan61113162019-07-25 09:09:40 +0100240/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000241inline bool Is12OrLaterOperand(V1_2::Operand)
242{
243 return true;
244}
245
246#endif
247
248#ifdef ARMNN_ANDROID_NN_V1_3
249
250inline bool IsBool(V1_3::Operand operand)
251{
252 return operand.type == V1_3::OperandType::BOOL;
253}
254
255/// Checks if a operand is 1_2 Operand
256inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100257{
258 return true;
259}
260
Mike Kellyb5fdf382019-06-11 16:35:25 +0100261#endif
262
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100263template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000264armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
265 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100266 armnn::TensorInfo reshapeInfo)
267{
268 armnn::ReshapeDescriptor reshapeDescriptor;
269 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
270
271 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100272 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273
274 // Attach the input layer to the reshape layer
275 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
276 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
277
278 return *reshapeLayer;
279}
280
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000281bool BroadcastTensor(LayerInputHandle& input0,
282 LayerInputHandle& input1,
283 armnn::IConnectableLayer* startLayer,
284 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100285{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100286 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100287
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
289 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
290
291 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
292 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
293
294 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100295 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
297 input0.Connect(startLayer->GetInputSlot(0));
298 input1.Connect(startLayer->GetInputSlot(1));
299
Sadik Armagan64b19b52019-08-19 09:49:58 +0100300 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301 }
302
303 // Since the number of dimensions do not match then we need to add degenerate dimensions
304 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
305
306 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
307 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
308 boost::numeric_cast<int>(inputDimensions1));
309
310 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
311 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
312 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
313
314 const armnn::TensorShape& smallShape = smallInfo.GetShape();
315 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
316 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
317 {
318 reshapedDimensions[i] = smallShape[i - sizeDifference];
319 }
320
321 armnn::TensorInfo reshapedInfo = smallInfo;
322 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
323 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
326 armnn::ReshapeDescriptor reshapeDescriptor;
327
328 bool isSupported = false;
329 FORWARD_LAYER_SUPPORT_FUNC(__func__,
330 IsReshapeSupported,
331 data.m_Backends,
332 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000333 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100334 reshapedInfo,
335 reshapeDescriptor);
336 if (!isSupported)
337 {
338 return false;
339 }
340
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100341 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100343
344 if (input0IsSmaller)
345 {
346 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
347 //
348 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100349 // | |
350 // Reshape |
351 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100353
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
355 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100356 }
357 else
358 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100359 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
360 //
361 // Input0 Input1
362 // | |
363 // | Reshape
364 // \ /
365 // StartLayer
366
arovir01b0717b52018-09-05 17:03:25 +0100367 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100368 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100369 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100370
371 return true;
arovir01b0717b52018-09-05 17:03:25 +0100372}
373
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000374void CalcPadding(uint32_t input,
375 uint32_t kernel,
376 uint32_t stride,
377 uint32_t& outPadHead,
378 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100379 android::nn::PaddingScheme scheme)
380{
381 int32_t padHead;
382 int32_t padTail;
383 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
384 outPadHead = boost::numeric_cast<uint32_t>(padHead);
385 outPadTail = boost::numeric_cast<uint32_t>(padTail);
386}
387
Kevin May42477c12020-03-26 13:34:14 +0000388#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100389
390void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
391 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
392{
393 int32_t padHead;
394 int32_t padTail;
395 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
396 outPadHead = boost::numeric_cast<uint32_t>(padHead);
397 outPadTail = boost::numeric_cast<uint32_t>(padTail);
398}
399
Mike Kelly26123db2020-01-15 10:02:33 +0000400void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100401 int32_t& outPadTail, android::nn::PaddingScheme scheme)
402{
403 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
404}
405
Mike Kelly86b36d42019-07-12 16:39:33 +0100406#endif
407
Matthew Bentham912b3622019-05-03 15:49:14 +0100408Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100409{
410 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100411 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100412 shape.dimensions = operand.dimensions;
413 shape.scale = operand.scale;
414 shape.offset = operand.zeroPoint;
415 return shape;
416}
417
Kevin May42477c12020-03-26 13:34:14 +0000418#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100419
420Shape GetOperandShape(const V1_2::Operand& operand)
421{
422 Shape shape;
423 shape.type = OperandType(operand.type);
424 shape.dimensions = operand.dimensions;
425 shape.scale = operand.scale;
426 shape.offset = operand.zeroPoint;
427 return shape;
428}
429
430#endif
431
Kevin May42477c12020-03-26 13:34:14 +0000432#ifdef ARMNN_ANDROID_NN_V1_3
433
434Shape GetOperandShape(const V1_3::Operand& operand)
435{
436 Shape shape;
437 shape.type = OperandType(operand.type);
438 shape.dimensions = operand.dimensions;
439 shape.scale = operand.scale;
440 shape.offset = operand.zeroPoint;
441 return shape;
442}
443
444#endif
445
arovir01b0717b52018-09-05 17:03:25 +0100446// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
447// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100448// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
449// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100450void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000451 const armnn::TensorInfo& weightInfo,
452 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100453{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000454 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100455 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 // NOTE: Bias scale is always set to 0 for per-axis quantization and
457 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
458 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 return biasScale * inputInfo.GetQuantizationScale();
461 };
462
463 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
464 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
465
466 biasInfo.SetQuantizationScales(biasScales);
467 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
468
469 ALOGV("Bias quantization params have been updated for per-axis quantization");
470 }
471 else
472 {
473 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
474 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
475 {
476 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
477 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
478 {
479 ALOGW("Bias quantization scale has been modified to match input * weights");
480 biasInfo.SetQuantizationScale(expectedBiasScale);
481 }
arovir01b0717b52018-09-05 17:03:25 +0100482 }
483 }
484}
485
486// 4D Tensor Permutations
487const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100488const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
489
490// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000491const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
492const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100493
494template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000495armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
496 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100497{
498 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000499 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100500
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100501 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100502
503 // Connect input to swizzle layer
504 input.Connect(layer->GetInputSlot(0));
505
506 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000507 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100508 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
509
510 return *layer;
511}
512
arovir01b0717b52018-09-05 17:03:25 +0100513bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
514 const armnn::TensorShape & outputShape,
515 uint32_t concatDim)
516{
517 // Validate the output shape is correct given the input shapes (which have just been validated)
518 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
519 if (outputShape.GetNumDimensions() != numDimensions)
520 {
521 return Fail("%s: Output shape has wrong number of dimensions", __func__);
522 }
523
524 unsigned int outputSizeAlongConcatenatedDimension = 0;
525 for (unsigned int i = 0; i < inputShapes.size(); i++)
526 {
527 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
528 }
529
530 for (unsigned int i = 0; i < numDimensions; ++i)
531 {
532 if (i == concatDim)
533 {
534 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
535 {
536 return Fail(
537 "%s: Invalid output shape for dimension %d (%d != %d)",
538 __func__,
539 i,
540 outputShape[i],
541 outputSizeAlongConcatenatedDimension);
542 }
543 }
544 else
545 {
546 if (outputShape[i] != inputShapes[0][i])
547 {
548 return Fail("%s: Invalid output shape", __func__);
549 }
550 }
551 }
552
553 return true;
554}
555
556bool RequiresReshape(armnn::TensorShape & inputShape)
557{
558 return inputShape.GetNumDimensions() < 3;
559}
560
arovir01b0717b52018-09-05 17:03:25 +0100561void SwizzleInputs(armnn::INetwork& network,
562 std::vector<LayerInputHandle>& inputs,
563 std::vector<armnn::TensorShape>& inputShapes,
564 const armnn::PermutationVector& mapping)
565{
566 if (!mapping.IsEqual(IdentityPermutation4D))
567 {
568 size_t nInputs = inputs.size();
569 for (size_t i=0; i<nInputs; ++i)
570 {
571 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000572 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100573 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
574 auto& outputInfo = outputSlot.GetTensorInfo();
575 // replace inputs with the swizzled ones
576 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
577 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
578 }
579 }
580}
581
Teresa Charlin185f5882020-04-06 21:59:18 +0100582bool TransposeInputTensors(ConversionData& data,
583 std::vector<LayerInputHandle>& inputs,
584 std::vector<armnn::TensorShape>& inputShapes,
585 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000586{
587 if (!mapping.IsEqual(IdentityPermutation4D))
588 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100589 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590 size_t nInputs = inputs.size();
591 for (size_t i=0; i<nInputs; ++i)
592 {
593 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000594 armnn::TransposeDescriptor transposeDesc;
595 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100596 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000597
598 bool isSupported = false;
599 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000600 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601 data.m_Backends,
602 isSupported,
603 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100604 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000605 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606 if (!isSupported)
607 {
608 return false;
609 }
610
611 }
612 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
613 }
614 return true;
615}
616
617
narpra01f176d5a2018-11-18 20:17:48 +0000618bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
619 int32_t & concatDimension,
620 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100621{
narpra01f176d5a2018-11-18 20:17:48 +0000622 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100623 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100624
625 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000626 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
627 // or along dimension 0 or 2 for a 3-D tensor.
628 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100629 {
narpra01f176d5a2018-11-18 20:17:48 +0000630 concatDimension = 1;
631 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
632 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100633 }
narpra01f176d5a2018-11-18 20:17:48 +0000634 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100635 {
narpra01f176d5a2018-11-18 20:17:48 +0000636 concatDimension = 0;
637 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
638 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100639 }
narpra01f176d5a2018-11-18 20:17:48 +0000640 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100641}
642
643} // anonymous namespace
644
645namespace armnn_driver
646{
647
648//// Creates an ArmNN activation layer and connects it to the given layer, if the
649//// passed in AndroidNN activation function requires so.
650//// @return The end layer of the sequence of layers built for the given AndroidNN
651//// activation function or nullptr if an error occurred (e.g. unsupported activation).
652//// Note that the end layer matches the input layer if no activation is required
653//// (the sequence of layers has length 1).
654armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
655 ActivationFn activation,
656 armnn::IConnectableLayer* prevLayer,
657 ConversionData& data);
658
659} // namespace armnn_driver
660
661///
662/// Utility templates
663///
664
665namespace armnn_driver
666{
667
668using namespace android::nn;
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
671 typename HalOperand = typename HalPolicy::Operand,
672 typename HalOperation = typename HalPolicy::Operation,
673 typename HalModel = typename HalPolicy::Model>
674const HalOperand* GetInputOperand(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100677 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100678{
679 if (inputIndex >= operation.inputs.size())
680 {
saoste01b8471482018-10-10 09:44:51 +0100681 if (failOnIndexOutOfBounds)
682 {
683 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
684 }
arovir01b0717b52018-09-05 17:03:25 +0100685 return nullptr;
686 }
687
Kevin May42477c12020-03-26 13:34:14 +0000688 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100689 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000690 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalOperation = typename HalPolicy::Operation,
696 typename HalModel = typename HalPolicy::Model>
697const HalOperand* GetOutputOperand(const HalOperation& operation,
698 uint32_t outputIndex,
699 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100700{
701 if (outputIndex >= operation.outputs.size())
702 {
703 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
704 return nullptr;
705 }
706
707 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100708 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100709
Kevin May42477c12020-03-26 13:34:14 +0000710 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100716const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100717 const HalModel& model,
718 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100720{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100722
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100724 switch (operand.lifetime)
725 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100727 {
728 // Constant found in model.operandValues
729 valueStart = &model.operandValues[operand.location.offset];
730 break;
731 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100733 {
734 // Constant specified via a Memory object
735 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
736 break;
737 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000739 {
740 // An optional input tensor with no values is not an error so should not register as a fail
741 if (optional)
742 {
743 valueStart = nullptr;
744 break;
745 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100746 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000747 }
arovir01b0717b52018-09-05 17:03:25 +0100748 default:
749 {
750 // Unsupported/invalid (e.g. can't get value of an input to the model)
751 Fail("%s: unsupported/invalid operand lifetime: %s",
752 __func__, toString(operand.lifetime).c_str());
753 valueStart = nullptr;
754 }
755 }
756
757 return valueStart;
758}
759
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100760template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100761 typename HalOperation = typename HalPolicy::Operation,
762 typename HalModel = typename HalPolicy::Model,
763 typename HalOperandType = typename HalPolicy::OperandType>
764bool GetOperandType(const HalOperation& operation,
765 uint32_t inputIndex,
766 const HalModel& model,
767 HalOperandType& type)
768{
769 using HalOperand = typename HalPolicy::Operand;
770
771 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
772 if (!operand)
773 {
774 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
775 }
776
777 type = operand->type;
778 return true;
779}
780
781template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000782 typename HalOperand = typename HalPolicy::Operand>
783bool IsOperandConstant(const HalOperand& operand)
784{
785 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
786
787 HalOperandLifeTime lifetime = operand.lifetime;
788
789 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
790 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
791 lifetime == HalOperandLifeTime::NO_VALUE;
792}
793
794template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 typename HalOperand = typename HalPolicy::Operand,
796 typename HalModel = typename HalPolicy::Model>
797ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
798 const HalModel& model,
799 const ConversionData& data,
800 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
801 const armnn::TensorShape* overrideTensorShape = nullptr,
802 bool optional = false)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 if (!IsOperandTypeSupportedForTensors(operand.type))
805 {
806 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
807 return ConstTensorPin();
808 }
809
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000810 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 {
812 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
813 return ConstTensorPin();
814 }
815
816 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
817 if (!valueStart)
818 {
819 if (optional)
820 {
821 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
822 return ConstTensorPin(true);
823 }
824 // mandatory tensor with no values
825 Fail("%s: failed to get operand address", __func__);
826 return ConstTensorPin();
827 }
828
829 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000830 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
831 if (tensorInfo.HasPerAxisQuantization())
832 {
833 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
834 }
835
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 if (overrideTensorShape != nullptr)
837 {
838 tensorInfo.SetShape(*overrideTensorShape);
839 }
840 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
841}
842
843template<typename HalPolicy,
844 typename HalOperation = typename HalPolicy::Operation,
845 typename HalModel = typename HalPolicy::Model>
846ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
847 uint32_t inputIndex,
848 const HalModel& model,
849 const ConversionData& data,
850 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
851 const armnn::TensorShape* overrideTensorShape = nullptr,
852 bool optional = false)
853{
854 using HalOperand = typename HalPolicy::Operand;
855
856 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
857 if (!operand)
858 {
859 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
860 return ConstTensorPin();
861 }
862 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
863 model,
864 data,
865 dimensionMappings,
866 overrideTensorShape,
867 optional);
868}
869
870template<typename HalPolicy,
871 typename OutputType,
872 typename HalOperandType = typename HalPolicy::OperandType,
873 typename HalOperation = typename HalPolicy::Operation,
874 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100875bool GetInputScalar(const HalOperation& operation,
876 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100877 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100878 OutputType& outValue,
879 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100880 const ConversionData& data,
881 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100882{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100883 using HalOperand = typename HalPolicy::Operand;
884
885 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100886 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100887 {
888 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
889 }
890
Sadik Armagan813f2302020-05-19 14:10:30 +0100891 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100892 {
893 return Fail("%s: unexpected operand type: %s (should be %s)",
894 __func__, toString(operand->type).c_str(), toString(type).c_str());
895 }
896
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100898 {
899 return Fail("%s: incorrect operand location length: %i (should be %i)",
900 __func__, operand->location.length, sizeof(OutputType));
901 }
902
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100903 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100904 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100905 {
906 return Fail("%s: failed to get address for operand", __func__);
907 }
908
Sadik Armagan813f2302020-05-19 14:10:30 +0100909 if(!optional)
910 {
911 outValue = *(static_cast<const OutputType*>(valueAddress));
912 }
913
arovir01b0717b52018-09-05 17:03:25 +0100914 return true;
915}
916
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917template<typename HalPolicy,
918 typename HalOperation = typename HalPolicy::Operation,
919 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100920bool GetInputInt32(const HalOperation& operation,
921 uint32_t inputIndex,
922 int32_t& outValue,
923 const HalModel& model,
924 const ConversionData& data)
925{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100926 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100927}
928
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100929template<typename HalPolicy,
930 typename HalOperation = typename HalPolicy::Operation,
931 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100932bool GetInputFloat32(const HalOperation& operation,
933 uint32_t inputIndex,
934 float& outValue,
935 const HalModel& model,
936 const ConversionData& data)
937{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100939}
940
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100941template<typename HalPolicy,
942 typename HalOperation = typename HalPolicy::Operation,
943 typename HalOperandType = typename HalPolicy::OperandType,
944 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100945bool GetInputActivationFunctionImpl(const HalOperation& operation,
946 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100947 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100948 ActivationFn& outActivationFunction,
949 const HalModel& model,
950 const ConversionData& data)
951{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100952 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100953 {
954 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
955 __func__,
956 toString(type).c_str(),
957 toString(OperandType::INT32).c_str(),
958 toString(OperandType::TENSOR_INT32).c_str());
959 }
960
961 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100963 {
964 return Fail("%s: failed to get activation input value", __func__);
965 }
966 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
967 return true;
968}
969
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100970template<typename HalPolicy,
971 typename HalOperation = typename HalPolicy::Operation,
972 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100973bool GetInputActivationFunction(const HalOperation& operation,
974 uint32_t inputIndex,
975 ActivationFn& outActivationFunction,
976 const HalModel& model,
977 const ConversionData& data)
978{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979 return GetInputActivationFunctionImpl<HalPolicy>(operation,
980 inputIndex,
981 HalPolicy::OperandType::INT32,
982 outActivationFunction,
983 model,
984 data);
arovir01b0717b52018-09-05 17:03:25 +0100985}
986
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987template<typename HalPolicy,
988 typename HalOperation = typename HalPolicy::Operation,
989 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100990bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
991 uint32_t inputIndex,
992 ActivationFn& outActivationFunction,
993 const HalModel& model,
994 const ConversionData& data)
995{
996 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100997 return GetInputActivationFunctionImpl<HalPolicy>(operation,
998 inputIndex,
999 HalPolicy::OperandType::INT32,
1000 outActivationFunction,
1001 model,
1002 data);
arovir01b0717b52018-09-05 17:03:25 +01001003}
1004
1005
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001006template<typename HalPolicy,
1007 typename HalOperation = typename HalPolicy::Operation,
1008 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001009bool GetOptionalInputActivation(const HalOperation& operation,
1010 uint32_t inputIndex,
1011 ActivationFn& activationFunction,
1012 const HalModel& model,
1013 const ConversionData& data)
1014{
1015 if (operation.inputs.size() <= inputIndex)
1016 {
1017 activationFunction = ActivationFn::kActivationNone;
1018 }
1019 else
1020 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001021 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001022 {
1023 return Fail("%s: Operation has invalid inputs", __func__);
1024 }
1025 }
1026 return true;
1027}
1028
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001029template<typename HalPolicy,
1030 typename ConvolutionDescriptor,
1031 typename HalOperation = typename HalPolicy::Operation,
1032 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001033bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1034 uint32_t dilationXIndex,
1035 ConvolutionDescriptor& descriptor,
1036 const HalModel& model,
1037 const ConversionData& data)
1038{
1039 bool success = true;
1040 if (operation.inputs.size() >= dilationXIndex + 2)
1041 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042 success &= GetInputScalar<HalPolicy>(operation,
1043 dilationXIndex,
1044 HalPolicy::OperandType::INT32,
1045 descriptor.m_DilationX,
1046 model,
1047 data);
1048 success &= GetInputScalar<HalPolicy>(operation,
1049 dilationXIndex + 1,
1050 HalPolicy::OperandType::INT32,
1051 descriptor.m_DilationY,
1052 model,
1053 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001054 }
1055
1056 return success;
1057}
1058
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001060 typename HalOperation = typename HalPolicy::Operation,
1061 typename HalModel = typename HalPolicy::Model>
1062bool GetOptionalBool(const HalOperation& operation,
1063 uint32_t inputIndex,
1064 const HalModel& model,
1065 const ConversionData& data)
1066{
1067 using HalOperand = typename HalPolicy::Operand;
1068
1069 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1070 if (!operand)
1071 {
1072 return false;
1073 }
1074
1075 if (!IsBool(*operand))
1076 {
1077 return false;
1078 }
1079
1080 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1081 if (!valueAddress)
1082 {
1083 return false;
1084 }
1085
1086 if (*(static_cast<const bool*>(valueAddress)))
1087 {
1088 return true;
1089 }
1090 else
1091 {
1092 return false;
1093 }
1094}
1095
1096template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 typename HalOperand = typename HalPolicy::Operand,
1098 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001099bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001100 std::vector<int32_t>& outValues,
1101 const HalModel& model,
1102 const ConversionData& data)
1103{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001104 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001105 {
1106 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1107 }
1108
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001109 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001110 if (!startAddress)
1111 {
1112 return Fail("%s: failed to get operand address", __func__, operand.type);
1113 }
1114
1115 // Check number of bytes is sensible
1116 const uint32_t numBytes = operand.location.length;
1117 if (numBytes % sizeof(int32_t) != 0)
1118 {
1119 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1120 __func__, numBytes, sizeof(int32_t));
1121 }
1122
1123 outValues.resize(numBytes / sizeof(int32_t));
1124 memcpy(outValues.data(), startAddress, numBytes);
1125 return true;
1126}
1127
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001128template<typename HalPolicy,
1129 typename HalOperation = typename HalPolicy::Operation,
1130 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001131bool GetInputPaddingScheme(const HalOperation& operation,
1132 uint32_t inputIndex,
1133 PaddingScheme& outPaddingScheme,
1134 const HalModel& model,
1135 const ConversionData& data)
1136{
1137 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001139 {
1140 return Fail("%s: failed to get padding scheme input value", __func__);
1141 }
1142
1143 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1144 return true;
1145}
1146
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001147template<typename HalPolicy,
1148 typename HalOperation = typename HalPolicy::Operation,
1149 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001150LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1151 uint32_t inputIndex,
1152 const HalModel& model,
1153 ConversionData& data)
1154{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001155 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001156 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1158
1159 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001160 if (!operand)
1161 {
1162 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1163 return LayerInputHandle();
1164 }
1165
1166 if (!IsOperandTypeSupportedForTensors(operand->type))
1167 {
1168 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1169 return LayerInputHandle();
1170 }
1171
Sadik Armagan44bcc022019-06-18 17:21:36 +01001172 try
arovir01b0717b52018-09-05 17:03:25 +01001173 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001174 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001175 if (IsDynamicTensor(operandTensorInfo))
1176 {
1177 Fail("%s: dynamic input tensors are not supported", __func__);
1178 return LayerInputHandle();
1179 }
arovir01b0717b52018-09-05 17:03:25 +01001180
Sadik Armagan44bcc022019-06-18 17:21:36 +01001181 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001182 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001184 {
1185 // NOTE: We must check whether we can support the input tensor on at least one
1186 // of the provided backends; otherwise we cannot convert the operation
1187 bool isInputSupported = false;
1188 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1189 IsInputSupported,
1190 data.m_Backends,
1191 isInputSupported,
1192 operandTensorInfo);
1193
1194 if (!isInputSupported)
1195 {
1196 Fail("%s: unsupported input tensor", __func__);
1197 return LayerInputHandle();
1198 }
1199
1200 BOOST_FALLTHROUGH; // intentional fallthrough
1201 }
1202 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001203 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001204 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001205 // The tensor is either an operand internal to the model, or a model input.
1206 // It can be associated with an ArmNN output slot for an existing layer.
1207
1208 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1209 const uint32_t operandIndex = operation.inputs[inputIndex];
1210 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001211 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001212 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001213 case HalOperandLifeTime::CONSTANT_REFERENCE:
1214 {
1215 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1216 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1217 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001218 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001219 bool isSupported = false;
1220 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1221 IsConstantSupported,
1222 data.m_Backends,
1223 isSupported,
1224 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001225 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001226 {
1227 return LayerInputHandle();
1228 }
1229
1230 armnn::IConnectableLayer* constantLayer =
1231 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1232 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1233 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1234
1235 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1236 }
1237 else
1238 {
1239 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001240 return LayerInputHandle();
1241 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001242 break;
arovir01b0717b52018-09-05 17:03:25 +01001243 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001244 default:
arovir01b0717b52018-09-05 17:03:25 +01001245 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001246 // Unsupported lifetime for an input tensor
1247 Fail("%s: unsupported lifetime for input tensor: %s",
1248 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001249 return LayerInputHandle();
1250 }
arovir01b0717b52018-09-05 17:03:25 +01001251 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001252 }
1253 catch (UnsupportedOperand<HalOperandType>& e)
1254 {
1255 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1256 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001257 }
1258}
1259
Kevin May42477c12020-03-26 13:34:14 +00001260
1261#ifdef ARMNN_ANDROID_NN_V1_3
1262template<typename HalPolicy>
1263LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1264 uint32_t inputIndex,
1265 const::android::hardware::neuralnetworks::V1_3::Model& model,
1266 ConversionData& data)
1267{
1268 using HalOperand = typename HalPolicy::Operand;
1269 using HalOperandType = typename HalPolicy::OperandType;
1270 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1271
1272 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1273 if (!operand)
1274 {
1275 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1276 return LayerInputHandle();
1277 }
1278
1279 if (!IsOperandTypeSupportedForTensors(operand->type))
1280 {
1281 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1282 return LayerInputHandle();
1283 }
1284
1285 try
1286 {
1287 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1288 if (IsDynamicTensor(operandTensorInfo))
1289 {
1290 Fail("%s: dynamic input tensors are not supported", __func__);
1291 return LayerInputHandle();
1292 }
1293
1294 switch (operand->lifetime)
1295 {
1296 case HalOperandLifeTime::SUBGRAPH_INPUT:
1297 {
1298 // NOTE: We must check whether we can support the input tensor on at least one
1299 // of the provided backends; otherwise we cannot convert the operation
1300 bool isInputSupported = false;
1301 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1302 IsInputSupported,
1303 data.m_Backends,
1304 isInputSupported,
1305 operandTensorInfo);
1306
1307 if (!isInputSupported)
1308 {
1309 Fail("%s: unsupported input tensor", __func__);
1310 return LayerInputHandle();
1311 }
1312
1313 BOOST_FALLTHROUGH; // intentional fallthrough
1314 }
1315 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1316 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1317 {
1318 // The tensor is either an operand internal to the model, or a model input.
1319 // It can be associated with an ArmNN output slot for an existing layer.
1320
1321 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1322 const uint32_t operandIndex = operation.inputs[inputIndex];
1323 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1324 }
1325 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1326 case HalOperandLifeTime::CONSTANT_REFERENCE:
1327 {
1328 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1329 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1330 if (tensorPin.IsValid())
1331 {
1332 bool isSupported = false;
1333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1334 IsConstantSupported,
1335 data.m_Backends,
1336 isSupported,
1337 tensorPin.GetConstTensor().GetInfo());
1338 if (!isSupported)
1339 {
1340 return LayerInputHandle();
1341 }
1342
1343 armnn::IConnectableLayer* constantLayer =
1344 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1345 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1346 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1347
1348 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1349 }
1350 else
1351 {
1352 Fail("%s: invalid operand tensor", __func__);
1353 return LayerInputHandle();
1354 }
1355 break;
1356 }
1357 default:
1358 {
1359 // Unsupported lifetime for an input tensor
1360 Fail("%s: unsupported lifetime for input tensor: %s",
1361 __func__, toString(operand->lifetime).c_str());
1362 return LayerInputHandle();
1363 }
1364 }
1365 }
1366 catch (UnsupportedOperand<HalOperandType>& e)
1367 {
1368 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1369 return LayerInputHandle();
1370 }
1371}
1372#endif
1373
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001374template<typename HalPolicy,
1375 typename HalOperation = typename HalPolicy::Operation,
1376 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001377bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1378 uint32_t operationOutputIndex,
1379 armnn::IConnectableLayer& layer,
1380 uint32_t layerOutputIndex,
1381 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001382 ConversionData& data,
1383 const armnn::TensorInfo* overrideOutputInfo = nullptr)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001384{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001385 using HalOperand = typename HalPolicy::Operand;
1386
1387 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001388 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1389 {
1390 return false;
1391 }
1392
1393 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1394
1395 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1396 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1397
Sadik Armagan813f2302020-05-19 14:10:30 +01001398 if (overrideOutputInfo == nullptr)
1399 {
1400 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1401 }
1402 else
1403 {
1404 outputSlot.SetTensorInfo(*overrideOutputInfo);
1405 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001406
1407 return true;
1408}
1409
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001410template<typename HalPolicy,
1411 typename HalOperation = typename HalPolicy::Operation,
1412 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001413armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1414 uint32_t inputIndex,
1415 const HalModel& model,
1416 ConversionData& data)
1417{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001418 using HalOperand = typename HalPolicy::Operand;
1419
1420 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001421 if (!operand)
1422 {
1423 return armnn::DataLayout::NHWC;
1424 }
1425
1426 if (!IsBool(*operand))
1427 {
1428 return armnn::DataLayout::NHWC;
1429 }
1430
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001431 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001432 if (!valueAddress)
1433 {
1434 return armnn::DataLayout::NHWC;
1435 }
1436
1437 if (*(static_cast<const bool*>(valueAddress)))
1438 {
1439 return armnn::DataLayout::NCHW;
1440 }
1441 else
1442 {
1443 return armnn::DataLayout::NHWC;
1444 }
1445}
1446
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001447template<typename HalPolicy,
1448 typename HalOperation = typename HalPolicy::Operation,
1449 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001450bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1451 uint32_t outputIndex,
1452 armnn::IConnectableLayer& layer,
1453 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001454 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001455{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001456 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1457 outputIndex,
1458 layer,
1459 outputIndex,
1460 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001461 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001462}
1463
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001464template<typename HalPolicy,
1465 typename HalOperation = typename HalPolicy::Operation,
1466 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001467bool ConvertToActivation(const HalOperation& operation,
1468 const char* operationName,
1469 const armnn::ActivationDescriptor& activationDesc,
1470 const HalModel& model,
1471 ConversionData& data)
1472{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001473 using HalOperand = typename HalPolicy::Operand;
1474
1475 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001476 if (!input.IsValid())
1477 {
1478 return Fail("%s: Input 0 is invalid", operationName);
1479 }
1480
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001482 if (!outputOperand)
1483 {
1484 return false;
1485 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001486
1487 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001488 if (IsDynamicTensor(outInfo))
1489 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001490 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001491 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001492
1493 bool isSupported = false;
1494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1495 IsActivationSupported,
1496 data.m_Backends,
1497 isSupported,
1498 input.GetTensorInfo(),
1499 outInfo,
1500 activationDesc);
1501 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001502 {
1503 return false;
1504 }
1505
1506 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001507 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001508 input.Connect(layer->GetInputSlot(0));
1509
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001510 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001511}
1512
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001513template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001514 typename HalOperation = typename HalPolicy::Operation,
1515 typename HalModel = typename HalPolicy::Model>
1516bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1517{
1518 armnn::ActivationDescriptor desc;
1519 desc.m_Function = armnn::ActivationFunction::ReLu;
1520
1521 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1522}
1523
1524template<typename HalPolicy,
1525 typename HalOperation = typename HalPolicy::Operation,
1526 typename HalModel = typename HalPolicy::Model>
1527bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1528{
1529 armnn::ActivationDescriptor desc;
1530 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1531 desc.m_A = 1.0f;
1532 desc.m_B = -1.0f;
1533
1534 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1535}
1536
1537template<typename HalPolicy,
1538 typename HalOperation = typename HalPolicy::Operation,
1539 typename HalModel = typename HalPolicy::Model>
1540bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1541{
1542 armnn::ActivationDescriptor desc;
1543 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1544 desc.m_A = 6.0f;
1545
1546 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1547}
1548
1549template<typename HalPolicy,
1550 typename HalOperation = typename HalPolicy::Operation,
1551 typename HalModel = typename HalPolicy::Model>
1552bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1553{
1554 armnn::ActivationDescriptor desc;
1555 desc.m_Function = armnn::ActivationFunction::TanH;
1556 desc.m_A = 1.0f; // android nn does not support tanH parameters
1557 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1558
1559 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1560}
1561
1562template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001563 typename HalOperation = typename HalPolicy::Operation,
1564 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001565bool ConvertPaddings(const HalOperation& operation,
1566 const HalModel& model,
1567 ConversionData& data,
1568 unsigned int rank,
1569 armnn::PadDescriptor& padDescriptor)
1570{
1571 using HalOperand = typename HalPolicy::Operand;
1572
1573 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1574 if (!paddingsOperand)
1575 {
1576 return Fail("%s: Could not read paddings operand", __func__);
1577 }
1578
1579 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1580 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1581 {
1582 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1583 }
1584
1585 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001586 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1587 {
1588 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1589 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001590
1591 // add padding for each dimension of input tensor.
1592 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1593 {
1594 int paddingBeforeInput = paddings[i];
1595 int paddingAfterInput = paddings[i + 1];
1596
1597 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1598 {
1599 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1600 }
1601
1602 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1603 }
1604
1605 return true;
1606}
1607
1608template<typename HalPolicy,
1609 typename HalOperation = typename HalPolicy::Operation,
1610 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001611bool ConvertPooling2d(const HalOperation& operation,
1612 const char* operationName,
1613 armnn::PoolingAlgorithm poolType,
1614 const HalModel& model,
1615 ConversionData& data)
1616{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001617 using HalOperand = typename HalPolicy::Operand;
1618 using HalOperandType = typename HalPolicy::OperandType;
1619
1620 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001621 if (!input.IsValid())
1622 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001623 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001624 }
1625
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001626 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001627 if (!output)
1628 {
1629 return Fail("%s: Could not read output 0", __func__);
1630 }
1631
1632 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1633 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1634
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001635 if (IsDynamicTensor(outputInfo))
1636 {
1637 return Fail("%s: Dynamic output tensors are not supported", __func__);
1638 }
1639
arovir01b0717b52018-09-05 17:03:25 +01001640 armnn::Pooling2dDescriptor desc;
1641 desc.m_PoolType = poolType;
1642 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001643 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001644
1645 ActivationFn activation;
1646
Sadik Armagan15d63e22019-07-26 16:59:35 +01001647 auto inputSize = operation.inputs.size();
1648
1649 if (inputSize >= 10)
1650 {
1651 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1652 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1653 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1654 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1655 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1656 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1657 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1658 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1659 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1660 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1661 {
1662 return Fail("%s: Operation has invalid inputs", operationName);
1663 }
1664
Kevin May42477c12020-03-26 13:34:14 +00001665 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001666 {
1667 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1668 }
1669 }
1670 else
arovir01b0717b52018-09-05 17:03:25 +01001671 {
1672 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1673 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001674 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1675 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1676 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1677 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1678 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1679 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001680 {
1681 return Fail("%s: Operation has invalid inputs", operationName);
1682 }
1683
Kevin May42477c12020-03-26 13:34:14 +00001684 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001685 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001686 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001687 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001688
1689 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1690 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1691 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1692
1693 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1694 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001695 }
1696
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001697 bool isSupported = false;
1698 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1699 IsPooling2dSupported,
1700 data.m_Backends,
1701 isSupported,
1702 inputInfo,
1703 outputInfo,
1704 desc);
1705 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001706 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001707 return false;
arovir01b0717b52018-09-05 17:03:25 +01001708 }
arovir01b0717b52018-09-05 17:03:25 +01001709
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001710 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1711 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001712 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001713 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001714 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001715
1716 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1717 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001718 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001719 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001720 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001721
1722 input.Connect(pooling2dLayer->GetInputSlot(0));
1723
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001724 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001725}
1726
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001727template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001728 typename HalOperation = typename HalPolicy::Operation,
1729 typename HalModel = typename HalPolicy::Model>
1730bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001731{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001732 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001733
1734 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1735 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1736
1737 if (!input0.IsValid() || !input1.IsValid())
1738 {
1739 return Fail("%s: Operation has invalid inputs", __func__);
1740 }
1741
1742 // The FuseActivation parameter is always the input index 2
1743 // and it should be optional
1744 ActivationFn activationFunction;
1745 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1746 {
1747 return Fail("%s: Operation has invalid inputs", __func__);
1748 }
1749
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001750 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001751 if (!outputOperand)
1752 {
1753 return false;
1754 }
1755
1756 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1757 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1758
1759 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1760 if (IsDynamicTensor(outputInfo))
1761 {
1762 return Fail("%s: Dynamic output tensors are not supported", __func__);
1763 }
1764
1765 bool isSupported = false;
1766 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1767 IsAdditionSupported,
1768 data.m_Backends,
1769 isSupported,
1770 inputInfo0,
1771 inputInfo1,
1772 outputInfo);
1773 if (!isSupported)
1774 {
1775 return false;
1776 }
1777
1778 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1779 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1780
1781 if (endLayer != nullptr)
1782 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001783 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001784 if (!isReshapeSupported)
1785 {
1786 return false;
1787 }
1788
Mike Kelly46272802019-08-14 17:00:48 +01001789 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1790 }
1791 else
1792 {
1793 return Fail("%s: ProcessActivation failed", __func__);
1794 }
1795}
1796
1797template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001798 typename HalOperation = typename HalPolicy::Operation,
1799 typename HalModel = typename HalPolicy::Model>
1800bool ConvertArgMinMax(const HalOperation& operation,
1801 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001802 ConversionData& data,
1803 armnn::ArgMinMaxFunction argMinMaxFunction)
1804{
1805 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1806
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001807 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001808 using HalOperandType = typename HalPolicy::OperandType;
1809
1810 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1811
1812 if (!input0.IsValid())
1813 {
1814 return Fail("%s: Operation has invalid inputs", __func__);
1815 }
1816
1817 int32_t axis;
1818 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1819 {
1820 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1821 }
1822
1823 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1824 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1825
1826 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1827 {
1828 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1829 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1830 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1831 return Fail("%s: Axis must be in range [-n, n)", __func__);
1832 }
1833
1834 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1835 if (!output)
1836 {
1837 return Fail("%s: Could not read output 0", __func__);
1838 }
1839
1840 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1841
1842 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1843 if (IsDynamicTensor(outputInfo))
1844 {
1845 return Fail("%s: Dynamic output tensors are not supported", __func__);
1846 }
1847
1848 armnn::ArgMinMaxDescriptor descriptor;
1849 descriptor.m_Function = argMinMaxFunction;
1850 descriptor.m_Axis = axis;
1851
1852 bool isSupported = false;
1853 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1854 IsArgMinMaxSupported,
1855 data.m_Backends,
1856 isSupported,
1857 inputInfo0,
1858 outputInfo,
1859 descriptor);
1860 if (!isSupported)
1861 {
1862 return false;
1863 }
1864
1865 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1866 assert(layer != nullptr);
1867
1868 input0.Connect(layer->GetInputSlot(0));
1869
1870 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1871}
1872
1873template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001874 typename HalOperation = typename HalPolicy::Operation,
1875 typename HalModel = typename HalPolicy::Model>
1876bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001877{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001878 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001879 using HalOperandType = typename HalPolicy::OperandType;
1880
1881 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1882 if (operation.inputs.size() <= 1)
1883 {
1884 return Fail("%s: Operation has insufficient arguments", __func__);
1885 }
1886
1887 // Get inputs and outputs
1888 const std::size_t numInputTensors = operation.inputs.size() - 1;
1889
1890 int32_t concatDim;
1891 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1892 {
1893 return Fail("%s: Operation has invalid inputs", __func__);
1894 }
1895
1896 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1897 if (!outputOperand)
1898 {
1899 return Fail("%s: Operation has no outputs", __func__);
1900 }
1901
1902
1903 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1904 armnn::TensorShape outputShape = outputInfo.GetShape();
1905
1906 //
1907 // handle negative concat dims along the lines of tensorflow as described here:
1908 // https://www.tensorflow.org/api_docs/python/tf/concat
1909 // "negative axis refers to axis + rank(values)-th dimension"
1910 //
1911 if (concatDim < 0)
1912 {
1913 concatDim += outputShape.GetNumDimensions();
1914 }
1915
1916 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1917 {
1918 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1919 }
1920
1921 std::vector<LayerInputHandle> inputHandles;
1922 std::vector<armnn::TensorShape> inputShapes;
1923
1924 inputHandles.reserve(numInputTensors);
1925 inputShapes.reserve(numInputTensors);
1926
1927 bool inputsHaveBeenReshaped = false;
1928 unsigned int tensorDimensionsAdded = 0;
1929
1930 for (uint32_t i = 0; i < numInputTensors; ++i)
1931 {
1932 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1933 if (!operand)
1934 {
1935 return Fail("%s: Operation has invalid inputs", __func__);
1936 }
1937
Teresa Charlin3b959602019-10-31 17:05:47 +00001938 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1939 if (!operandInputHandle.IsValid())
1940 {
1941 return Fail("%s: Operation has invalid inputs", __func__);
1942 }
Mike Kellyb8805202019-07-31 17:25:43 +01001943
Teresa Charlin3b959602019-10-31 17:05:47 +00001944 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001945 if (operandShape.GetNumDimensions() == 0)
1946 {
1947 return Fail("%s: Operands with rank 0 are not supported", __func__);
1948 }
1949
1950 if (RequiresReshape(operandShape))
1951 {
1952 inputsHaveBeenReshaped = true;
1953
1954 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1955
1956 // Expand the tensor to three dimensions
1957 if (operandShape.GetNumDimensions() == 2)
1958 {
1959 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1960 tensorDimensionsAdded = 1;
1961 }
1962 else
1963 {
1964 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1965 tensorDimensionsAdded = 2;
1966 }
1967
Kevin Mayaed08ac2019-12-12 16:33:31 +00001968 armnn::ReshapeDescriptor reshapeDescriptor;
1969 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1970
1971 bool isSupported = false;
1972 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1973 IsReshapeSupported,
1974 data.m_Backends,
1975 isSupported,
1976 operandInputHandle.GetTensorInfo(),
1977 reshapeInfo,
1978 reshapeDescriptor);
1979 if (!isSupported)
1980 {
1981 return false;
1982 }
1983
Mike Kellyb8805202019-07-31 17:25:43 +01001984 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1985 *data.m_Network,
1986 operandInputHandle,
1987 reshapeInfo
1988 );
1989
1990 // Point to the reshape operation rather then the input operation
1991 operandShape = reshapeInfo.GetShape();
1992 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1993 }
1994
1995 inputShapes.emplace_back(operandShape);
1996 inputHandles.emplace_back(operandInputHandle);
1997
1998 if (!inputHandles.back().IsValid())
1999 {
2000 return Fail("%s: Operation has invalid inputs", __func__);
2001 }
2002 }
2003
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002004 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002005
2006 if (inputsHaveBeenReshaped)
2007 {
2008 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2009 concatDim += tensorDimensionsAdded;
2010
2011 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2012 if (tensorDimensionsAdded == 1)
2013 {
2014 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2015 }
2016 else if (tensorDimensionsAdded == 2)
2017 {
2018 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2019 }
2020 }
2021
2022 // Check if permutations is required and get the pair of permutations required for the concatenation.
2023 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2024 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2025 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2026
2027 bool needPermute =
2028 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2029
2030 if (needPermute)
2031 {
Mike Kelly4a956582020-02-28 10:32:09 +00002032 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002033 }
2034
2035 outputInfo.SetShape(outputShape);
2036
2037 // this is no-op for identity swizzles, otherwise it replaces both
2038 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002039 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002040 {
2041 return false;
2042 }
Mike Kellyb8805202019-07-31 17:25:43 +01002043
2044 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2045 armnn::OriginsDescriptor concatDescriptor;
2046
2047 try
2048 {
2049 // The concat descriptor is always created across the only supported concat dimension
2050 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2051 concatDescriptor =
2052 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2053 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002054 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002055 {
2056 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2057 }
2058
2059 // Validate the output shape is correct given the input shapes based on the
2060 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2061 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2062 {
2063 return Fail("%s: Error validating the output shape for concat", __func__);
2064 }
2065
2066 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2067 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2068 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2069
2070 bool isSupported = false;
2071 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2072 IsConcatSupported,
2073 data.m_Backends,
2074 isSupported,
2075 inputTensorInfos,
2076 outputInfo,
2077 concatDescriptor);
2078 if (!isSupported)
2079 {
2080 return false;
2081 }
2082
2083 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2084 assert(layer != nullptr);
2085 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2086
2087 // Connect inputs to the layer
2088 const int numInputSlots = layer->GetNumInputSlots();
2089 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2090 for (int i = 0; i < numInputSlots; ++i)
2091 {
2092 // connect the input directly to the merge (concat) layer
2093 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2094 }
2095
2096 if (needPermute)
2097 {
Mike Kelly4a956582020-02-28 10:32:09 +00002098 armnn::TransposeDescriptor transposeDesc;
2099 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002100 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2101 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2102 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002103
2104 bool isSupported = false;
2105 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002106 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002107 data.m_Backends,
2108 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002109 inputTransposeInfo,
2110 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002111 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002112 if (!isSupported)
2113 {
2114 return false;
2115 }
Mike Kellyb8805202019-07-31 17:25:43 +01002116 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002117 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2118 layer->GetOutputSlot(0),
2119 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002120 layer = &deswizzleLayer;
2121 }
2122
2123 if (inputsHaveBeenReshaped)
2124 {
2125 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2126
2127 // Undo the reshape knowing the amount of dimensions added
2128 if (tensorDimensionsAdded == 1)
2129 {
2130 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2131 afterConcatInfo.GetShape()[2] }));
2132 }
2133 else if (tensorDimensionsAdded == 2)
2134 {
2135 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2136 }
2137
Kevin Mayaed08ac2019-12-12 16:33:31 +00002138 armnn::ReshapeDescriptor reshapeDescriptor;
2139 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2140
2141 bool isSupported = false;
2142 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2143 IsReshapeSupported,
2144 data.m_Backends,
2145 isSupported,
2146 layer->GetOutputSlot(0).GetTensorInfo(),
2147 afterConcatInfo,
2148 reshapeDescriptor);
2149 if (!isSupported)
2150 {
2151 return false;
2152 }
2153
Mike Kellyb8805202019-07-31 17:25:43 +01002154 layer = &AddReshapeLayer(
2155 *data.m_Network,
2156 layer->GetOutputSlot(0),
2157 afterConcatInfo
2158 );
2159 }
2160
2161 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2162}
2163
2164template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002165 typename HalOperation = typename HalPolicy::Operation,
2166 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002167bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2168{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002169 using HalOperand = typename HalPolicy::Operand;
2170 using HalOperandType = typename HalPolicy::OperandType;
2171
2172 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173 if (!input.IsValid())
2174 {
2175 return Fail("%s: Operation has invalid inputs", __func__);
2176 }
2177
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002178 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002179 if (!output)
2180 {
2181 return Fail("%s: Could not read output 0", __func__);
2182 }
2183
2184 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002185 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002186
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002187 if (IsDynamicTensor(outputInfo))
2188 {
2189 return Fail("%s: Dynamic output tensors are not supported", __func__);
2190 }
2191
Mike Kellyb5fdf382019-06-11 16:35:25 +01002192 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002193 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2194 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002195
2196 if (!weightsPin.IsValid() || !biasPin.IsValid())
2197 {
2198 return Fail("%s: Operation has invalid inputs", __func__);
2199 }
2200
2201 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002202 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002203 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2204
2205 armnn::Convolution2dDescriptor desc;
2206 desc.m_DataLayout = armnn::DataLayout::NHWC;
2207 ActivationFn activation;
2208
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002209 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002210 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002211 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2212 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2213 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2214 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2215 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2216 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002217 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002218 {
2219 return Fail("%s: Operation has invalid inputs", __func__);
2220 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002221 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002222 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002223 {
2224 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002225 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2226 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2227 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002228 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002229 {
2230 return Fail("%s: Operation has invalid inputs", __func__);
2231 }
2232
2233 const uint32_t kernelX = weights.GetShape()[2];
2234 const uint32_t kernelY = weights.GetShape()[1];
2235 const uint32_t inputX = inputInfo.GetShape()[2];
2236 const uint32_t inputY = inputInfo.GetShape()[1];
2237
2238 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2239 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002240 }
2241 else
2242 {
2243 return Fail("%s: Unsupported number of operation inputs", __func__);
2244 }
2245
2246 desc.m_BiasEnabled = true;
2247 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2248
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002249 bool isSupported = false;
2250 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2251 IsConvolution2dSupported,
2252 data.m_Backends,
2253 isSupported,
2254 inputInfo,
2255 outputInfo,
2256 desc,
2257 weights.GetInfo(),
2258 biases);
2259 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002260 {
2261 return false;
2262 }
2263
2264 armnn::IConnectableLayer* startLayer =
2265 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2266
2267 if (!startLayer)
2268 {
2269 return Fail("%s: AddConvolution2dLayer failed", __func__);
2270 }
2271
2272 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2273
2274 if (!endLayer)
2275 {
2276 return Fail("%s: ProcessActivation failed", __func__);
2277 }
2278
2279 input.Connect(startLayer->GetInputSlot(0));
2280
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002281 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002282}
2283
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002284template<typename HalPolicy,
2285 typename HalOperation = typename HalPolicy::Operation,
2286 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002287bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2288{
2289 using HalOperand = typename HalPolicy::Operand;
2290 using HalOperandType = typename HalPolicy::OperandType;
2291
2292 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2293 if (!input.IsValid() )
2294 {
2295 return Fail("%s: Operation has invalid inputs", __func__);
2296 }
2297
2298 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2299 unsigned int rank = inputInfo.GetNumDimensions();
2300 if (rank != 4)
2301 {
2302 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2303 }
2304
2305 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2306 if (!output)
2307 {
2308 return Fail("%s: Could not read output 0", __func__);
2309 }
2310
2311 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2312 if (IsDynamicTensor(outputInfo))
2313 {
2314 return Fail("%s: Dynamic output tensors are not supported", __func__);
2315 }
2316
2317 armnn::DepthToSpaceDescriptor descriptor;
2318
2319 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2320 if (descriptor.m_BlockSize <= 1)
2321 {
2322 return Fail("%s: Block size must be at least 1 in all dimensions");
2323 }
2324
2325 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002326 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002327 {
2328 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2329 }
2330
2331 bool isSupported = false;
2332 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2333 IsDepthToSpaceSupported,
2334 data.m_Backends,
2335 isSupported,
2336 inputInfo,
2337 outputInfo,
2338 descriptor);
2339 if (!isSupported)
2340 {
2341 return false;
2342 }
2343
2344 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2345 assert(layer != nullptr);
2346 input.Connect(layer->GetInputSlot(0));
2347
2348 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2349}
2350
2351template<typename HalPolicy,
2352 typename HalOperation = typename HalPolicy::Operation,
2353 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002354bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2355{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002356 using HalOperand = typename HalPolicy::Operand;
2357 using HalOperandType = typename HalPolicy::OperandType;
2358
2359 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002360
2361 if (!input.IsValid())
2362 {
2363 return Fail("%s: Operation has invalid inputs", __func__);
2364 }
2365
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002366 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002367
2368 if (!output)
2369 {
2370 return Fail("%s: Could not read output 0", __func__);
2371 }
2372
2373 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002374 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002375
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002376 if (IsDynamicTensor(outputInfo))
2377 {
2378 return Fail("%s: Dynamic output tensors are not supported", __func__);
2379 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002380
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002381 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002382 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002383 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384
2385 if (weightsOperand == nullptr)
2386 {
2387 return Fail("%s: Operand is invalid", __func__);
2388 }
2389 armnn::DepthwiseConvolution2dDescriptor desc;
2390 desc.m_DataLayout = armnn::DataLayout::NHWC;
2391
Mike Kellyb5fdf382019-06-11 16:35:25 +01002392 // Reinterpret weight data as [ H, W, I, M ]
2393 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2394 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002395 inputInfo.GetShape()[3],
2396 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002397
2398 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2399 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2400
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002401 const ConstTensorPin weightsPin =
2402 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2403 1,
2404 model,
2405 data,
2406 HWIMToMIHW,
2407 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002408
2409 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002410 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002411
2412 if (!weightsPin.IsValid() || !biasPin.IsValid())
2413 {
2414 return Fail("%s: Operation has invalid inputs", __func__);
2415 }
2416
2417 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2418 armnn::ConstTensor bias = biasPin.GetConstTensor();
2419 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2420
2421 ActivationFn activation;
2422
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002423 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002424 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002425 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2426 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2427 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2428 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2429 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2430 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002431 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002432 {
2433 return Fail("%s: Operation has invalid inputs", __func__);
2434 }
2435 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002436 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002437 {
2438 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002439 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2440 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2441 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002442 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002443 {
2444 return Fail("%s: Operation has invalid inputs", __func__);
2445 }
2446
2447 const uint32_t kernelX = weights.GetShape()[3];
2448 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002449 const uint32_t inputX = inputInfo.GetShape()[2];
2450 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002451
2452 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2453 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2454 }
2455 else
2456 {
2457 return Fail("%s: Unsupported number of operation inputs", __func__);
2458 }
2459
2460 desc.m_BiasEnabled = true;
2461 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2462
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002463 bool isSupported = false;
2464 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2465 IsDepthwiseConvolutionSupported,
2466 data.m_Backends,
2467 isSupported,
2468 inputInfo,
2469 outputInfo,
2470 desc,
2471 weights.GetInfo(),
2472 biases);
2473 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002474 {
2475 return false;
2476 }
2477
2478 armnn::IConnectableLayer* startLayer =
2479 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2480 if (!startLayer)
2481 {
2482 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2483 }
2484
2485 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2486 if (!endLayer)
2487 {
2488 return Fail("%s: ProcessActivation failed", __func__);
2489 }
2490
2491 input.Connect(startLayer->GetInputSlot(0));
2492
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002493 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002494}
2495
Mike Kelly3c673942019-07-25 09:26:06 +01002496template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002497 typename HalOperation = typename HalPolicy::Operation,
2498 typename HalModel = typename HalPolicy::Model>
2499bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002500{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002501 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002502
2503 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2504 if (!input.IsValid())
2505 {
2506 return Fail("%s: Operation has invalid input", __func__);
2507 }
2508
Sadik Armagan98c0f662019-11-21 15:54:36 +00002509 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2510 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2511 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2512 {
2513 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2514 }
2515
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002516 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002517 if (!outputOperand)
2518 {
2519 return Fail("%s: Operation has invalid outputs", __func__);
2520 }
2521
2522 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2523 if (IsDynamicTensor(outputInfo))
2524 {
2525 return Fail("%s: Dynamic output tensors are not supported", __func__);
2526 }
2527
2528 bool isSupported = false;
2529 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2530 IsDequantizeSupported,
2531 data.m_Backends,
2532 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002533 inputInfo,
2534 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002535 if (!isSupported)
2536 {
2537 return false;
2538 }
2539
2540 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2541 assert(layer != nullptr);
2542 input.Connect(layer->GetInputSlot(0));
2543
2544 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2545}
2546
2547template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002548 typename HalOperation = typename HalPolicy::Operation,
2549 typename HalModel = typename HalPolicy::Model>
2550bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002551{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002552 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002553
2554 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2555 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2556
2557 if (!input0.IsValid() || !input1.IsValid())
2558 {
2559 return Fail("%s: Operation has invalid inputs", __func__);
2560 }
2561
2562 // The FuseActivation parameter is always the input index 2
2563 // and it should be optional
2564 ActivationFn activationFunction;
2565 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2566 {
2567 return Fail("%s: Operation has invalid inputs", __func__);
2568 }
2569
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002570 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002571 if (!output)
2572 {
2573 return Fail("%s: Could not read output 0", __func__);
2574 }
2575
2576 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2577 if (IsDynamicTensor(outputInfo))
2578 {
2579 return Fail("%s: Dynamic output tensors are not supported", __func__);
2580 }
2581
2582 bool isSupported = false;
2583 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2584 IsDivisionSupported,
2585 data.m_Backends,
2586 isSupported,
2587 input0.GetTensorInfo(),
2588 input1.GetTensorInfo(),
2589 outputInfo);
2590 if (!isSupported)
2591 {
2592 return false;
2593 }
2594
2595 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2596 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2597
2598 if (endLayer)
2599 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002600 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002601 if (!isReshapeSupported)
2602 {
2603 return false;
2604 }
2605
Mike Kelly46272802019-08-14 17:00:48 +01002606 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2607 }
2608 return Fail("%s: ProcessActivation failed", __func__);
2609}
2610
2611template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002612 typename HalOperation = typename HalPolicy::Operation,
2613 typename HalModel = typename HalPolicy::Model>
2614bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002615{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002616 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002617
2618 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2619 if (!input.IsValid())
2620 {
2621 return Fail("%s: Operation has invalid inputs", __func__);
2622 }
2623
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002624 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002625 if (!outputOperand)
2626 {
2627 return Fail("%s: Operation has invalid outputs", __func__);
2628 }
2629
2630 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2631 if (IsDynamicTensor(outputInfo))
2632 {
2633 return Fail("%s: Dynamic output tensors are not supported", __func__);
2634 }
2635
2636 bool isSupported = false;
2637 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2638 IsFloorSupported,
2639 data.m_Backends,
2640 isSupported,
2641 input.GetTensorInfo(),
2642 outputInfo);
2643 if (!isSupported)
2644 {
2645 return false;
2646 }
2647
2648 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2649 assert(layer != nullptr);
2650 input.Connect(layer->GetInputSlot(0));
2651
2652 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2653}
2654
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002655inline bool IsQSymm8(const V1_0::Operand&)
2656{
2657 return false;
2658}
2659
Kevin May42477c12020-03-26 13:34:14 +00002660#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002661
2662inline bool IsQSymm8(const V1_2::Operand& operand)
2663{
2664 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2665}
2666
2667#endif
2668
Kevin May42477c12020-03-26 13:34:14 +00002669#ifdef ARMNN_ANDROID_NN_V1_3
2670
2671inline bool IsQSymm8(const V1_3::Operand& operand)
2672{
2673 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2674}
2675
2676#endif
2677
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002678enum class DequantizeStatus
2679{
2680 SUCCESS,
2681 NOT_REQUIRED,
2682 INVALID_OPERAND
2683};
2684
2685using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2686
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002687template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002688 typename HalOperation = typename HalPolicy::Operation,
2689 typename HalModel = typename HalPolicy::Model>
2690DequantizeResult DequantizeIfRequired(size_t operand_index,
2691 const HalOperation& operation,
2692 const HalModel& model,
2693 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002694{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002695 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002696
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002697 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002698 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002699 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002700 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002701 }
2702
2703 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2704 {
2705 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002706 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002707 }
2708
2709 const size_t weightsInputIndex = operation.inputs[operand_index];
2710
2711 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2712 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002713 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002714 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002715 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002716 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002717 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2718 {
2719 continue;
2720 }
2721
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002722 size_t outOpIndex = weightsInputIndex + 1;
2723 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002724 {
2725 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002726 }
2727
2728 if (outOpIndex != weightsInputIndex)
2729 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002730 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002731 }
2732
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002733 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002734 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002735
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002736 if (!IsQSymm8(*operand))
2737 {
2738 // Only supporting dequantize from QSYMM8 to FLOAT
2739 break;
2740 }
2741
2742 // Allocate a new buffer for the dequantized data and manually dequantize
2743 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2744 if (!startValue)
2745 {
2746 // Failed to get the operand address
2747 break;
2748 }
2749
2750 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2751 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002752 const float quantizationScale = operand->scale;
2753
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002754 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2755 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2756 {
2757 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002758 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002759 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2760 }
2761
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002762 // Construct tensor info for dequantized ConstTensor
2763 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2764 operand->dimensions.data(),
2765 armnn::DataType::Float32);
2766
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002767 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2768 std::move(tensorInfo),
2769 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002770 }
2771
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002772 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002773}
2774
2775template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002776 typename HalOperation = typename HalPolicy::Operation,
2777 typename HalModel = typename HalPolicy::Model>
2778ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2779 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002780 const ConversionData& data,
2781 size_t operandIndex,
2782 bool optional = false)
2783{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002784 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2785
2786 DequantizeStatus status = std::get<3>(dequantized);
2787 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002788 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002789 case DequantizeStatus::INVALID_OPERAND:
2790 {
2791 // return invalid const tensor pin
2792 return ConstTensorPin();
2793 }
2794 case DequantizeStatus::NOT_REQUIRED:
2795 {
2796 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2797 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2798 }
2799 case DequantizeStatus::SUCCESS:
2800 default:
2801 {
2802 return ConstTensorPin(
2803 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2804 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002805 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002806}
2807
2808
Mike Kelly46272802019-08-14 17:00:48 +01002809template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002810 typename HalOperation = typename HalPolicy::Operation,
2811 typename HalModel = typename HalPolicy::Model>
2812bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002813{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002814 using HalOperand = typename HalPolicy::Operand;
2815
Mike Kelly46272802019-08-14 17:00:48 +01002816 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2817 if (!input.IsValid())
2818 {
2819 return Fail("%s: Operation has invalid inputs", __func__);
2820 }
2821
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002822 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002823 if (!output)
2824 {
2825 return Fail("%s: Could not read output 0", __func__);
2826 }
2827
2828 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2829 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2830
2831 if (IsDynamicTensor(outputInfo))
2832 {
2833 return Fail("%s: Dynamic output tensors are not supported", __func__);
2834 }
2835
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002836 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2837 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002838
2839 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002840 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002841 return Fail("%s: Operation has invalid weights", __func__);
2842 }
2843
2844 if (!biasPin.IsValid())
2845 {
2846 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002847 }
2848
2849 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2850 armnn::ConstTensor bias = biasPin.GetConstTensor();
2851 armnn::TensorInfo reshapedInfo = inputInfo;
2852
2853 try
2854 {
2855 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002856 }
2857 catch (const std::exception& e)
2858 {
Mike Kelly46272802019-08-14 17:00:48 +01002859 return Fail("%s: %s", __func__, e.what());
2860 }
2861
2862 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2863 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2864
2865 ActivationFn activationFunction;
2866 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2867 {
2868 return Fail("%s: Operation has invalid inputs", __func__);
2869 }
2870
2871 armnn::FullyConnectedDescriptor desc;
2872 desc.m_TransposeWeightMatrix = true;
2873 desc.m_BiasEnabled = true;
2874
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002875 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2876 weights.GetInfo().GetShape(),
2877 outputInfo.GetShape(),
2878 desc.m_TransposeWeightMatrix))
2879 {
2880 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2881 }
2882
Mike Kelly46272802019-08-14 17:00:48 +01002883 bool isSupported = false;
2884 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2885 IsFullyConnectedSupported,
2886 data.m_Backends,
2887 isSupported,
2888 reshapedInfo,
2889 outputInfo,
2890 weights.GetInfo(),
2891 bias.GetInfo(),
2892 desc);
2893 if (!isSupported)
2894 {
2895 return false;
2896 }
2897
2898 armnn::IConnectableLayer* startLayer =
2899 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2900 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2901
2902 if (endLayer != nullptr)
2903 {
2904 if (inputInfo.GetNumDimensions() > 2U)
2905 {
2906 armnn::ReshapeDescriptor reshapeDescriptor;
2907 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2908
2909 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2910 assert(reshapeLayer != nullptr);
2911 input.Connect(reshapeLayer->GetInputSlot(0));
2912 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2913 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2914 }
2915 else
2916 {
2917 input.Connect(startLayer->GetInputSlot(0));
2918 }
2919
2920 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2921 }
2922 else
2923 {
2924 return Fail("%s: ProcessActivation failed", __func__);
2925 }
2926}
2927
2928template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002929 typename HalOperation = typename HalPolicy::Operation,
2930 typename HalModel = typename HalPolicy::Model>
2931bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002932{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002933 using HalOperand = typename HalPolicy::Operand;
2934
Mike Kelly999e2092019-08-15 10:46:46 +01002935 if (operation.inputs.size() != 1)
2936 {
2937 return Fail("%s: Optional inputs are not supported", __func__);
2938 }
2939
Mike Kelly46272802019-08-14 17:00:48 +01002940 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2941 if (!input.IsValid())
2942 {
2943 return Fail("%s: Operation has invalid inputs", __func__);
2944 }
2945
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002946 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002947 if (!output)
2948 {
2949 return Fail("%s: Could not read output 0", __func__);
2950 }
2951
2952 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2953 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2954
2955 if (IsDynamicTensor(outputInfo))
2956 {
2957 return Fail("%s: Dynamic output tensors are not supported", __func__);
2958 }
2959 if (outputInfo.GetNumDimensions() != 4u)
2960 {
2961 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2962 }
2963
2964 armnn::L2NormalizationDescriptor desc;
2965 desc.m_DataLayout = armnn::DataLayout::NHWC;
2966
2967 bool isSupported = false;
2968 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2969 IsL2NormalizationSupported,
2970 data.m_Backends,
2971 isSupported,
2972 inputInfo,
2973 outputInfo,
2974 desc);
2975 if (!isSupported)
2976 {
2977 return false;
2978 }
2979
2980 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2981 assert(layer != nullptr);
2982 input.Connect(layer->GetInputSlot(0));
2983
2984 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2985}
2986
2987template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002988 typename HalOperation = typename HalPolicy::Operation,
2989 typename HalModel = typename HalPolicy::Model>
2990bool ConvertLocalResponseNormalization(const HalOperation& operation,
2991 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002992 ConversionData& data)
2993{
Mike Kelly999e2092019-08-15 10:46:46 +01002994 if (operation.inputs.size() != 5)
2995 {
2996 return Fail("%s: Optional inputs are not supported", __func__);
2997 }
2998
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002999 using HalOperand = typename HalPolicy::Operand;
3000 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003001
3002 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3003 if (!input.IsValid())
3004 {
3005 return Fail("%s: Operation has invalid inputs", __func__);
3006 }
3007
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003008 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003009 if (!output)
3010 {
3011 return Fail("%s: Could not read output 0", __func__);
3012 }
3013
3014 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3015 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3016
3017 if (IsDynamicTensor(outputInfo))
3018 {
3019 return Fail("%s: Dynamic output tensors are not supported", __func__);
3020 }
3021 if (outputInfo.GetNumDimensions() != 4u)
3022 {
3023 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3024 }
3025
3026 armnn::NormalizationDescriptor descriptor;
3027 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3028 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3029 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3030
3031 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003032 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003033 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3034 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3035 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3036 {
3037 return Fail("%s: Operation has invalid inputs", __func__);
3038 }
3039
3040 // ArmNN expects normSize to be the full size of the normalization
3041 // window rather than the radius as in AndroidNN.
3042 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3043
3044 bool isSupported = false;
3045 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3046 IsNormalizationSupported,
3047 data.m_Backends,
3048 isSupported,
3049 inputInfo,
3050 outputInfo,
3051 descriptor);
3052 if (!isSupported)
3053 {
3054 return false;
3055 }
3056
3057
3058 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3059 assert(layer != nullptr);
3060 input.Connect(layer->GetInputSlot(0));
3061
3062 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3063}
3064
3065template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003066 typename HalOperation = typename HalPolicy::Operation,
3067 typename HalModel = typename HalPolicy::Model>
3068bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003069{
Mike Kelly46272802019-08-14 17:00:48 +01003070 armnn::ActivationDescriptor desc;
3071 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3072
3073 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3074}
3075
3076template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003077 typename HalOperation = typename HalPolicy::Operation,
3078 typename HalModel = typename HalPolicy::Model>
3079bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003080{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003081 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003082
3083 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3084 if (!input.IsValid())
3085 {
3086 return Fail("%s: Operation has invalid inputs", __func__);
3087 }
3088
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003089 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003090 if (!output)
3091 {
3092 return Fail("%s: Could not read output 0", __func__);
3093 }
3094
3095 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3096 if (IsDynamicTensor(outputInfo))
3097 {
3098 return Fail("%s: Dynamic output tensors are not supported", __func__);
3099 }
3100
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003101 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003102 if (!axisOperand)
3103 {
3104 return Fail("%s: Could not read input 1", __func__);
3105 }
3106
3107 std::vector<int32_t> axis;
3108 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3109 {
3110 return Fail("%s: Input 1 has invalid values", __func__);
3111 }
3112
3113 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3114
3115 // Convert the axis to unsigned int and remove duplicates.
3116 unsigned int rank = inputInfo.GetNumDimensions();
3117 std::set<unsigned int> uniqueAxis;
3118 std::transform(axis.begin(), axis.end(),
3119 std::inserter(uniqueAxis, uniqueAxis.begin()),
3120 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3121
3122 // Get the "keep dims" flag.
3123 int32_t keepDims = 0;
3124 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3125 {
3126 return Fail("%s: Could not read input 2", __func__);
3127 }
3128
3129 armnn::MeanDescriptor descriptor;
3130 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3131 descriptor.m_KeepDims = keepDims > 0;
3132
3133 bool isSupported = false;
3134 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3135 IsMeanSupported,
3136 data.m_Backends,
3137 isSupported,
3138 inputInfo,
3139 outputInfo,
3140 descriptor);
3141 if (!isSupported)
3142 {
3143 return false;
3144 }
3145
3146 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3147 assert(layer != nullptr);
3148 input.Connect(layer->GetInputSlot(0));
3149
3150 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3151}
3152
3153template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003154 typename HalOperation = typename HalPolicy::Operation,
3155 typename HalModel = typename HalPolicy::Model>
3156bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003157{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003158 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003159
3160 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3161 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3162
3163 if (!input0.IsValid() || !input1.IsValid())
3164 {
3165 return Fail("%s: Operation has invalid inputs", __func__);
3166 }
3167
3168 // The FuseActivation parameter is always the input index 2
3169 // and it should be optional
3170 ActivationFn activationFunction;
3171 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3172 {
3173 return Fail("%s: Operation has invalid inputs", __func__);
3174 }
3175
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003176 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003177
3178 if (outputOperand == nullptr)
3179 {
3180 return false;
3181 }
3182
3183 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3184 if (IsDynamicTensor(outputInfo))
3185 {
3186 return Fail("%s: Dynamic output tensors are not supported", __func__);
3187 }
3188
3189 bool isSupported = false;
3190 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3191 IsMultiplicationSupported,
3192 data.m_Backends,
3193 isSupported,
3194 input0.GetTensorInfo(),
3195 input1.GetTensorInfo(),
3196 outputInfo);
3197 if (!isSupported)
3198 {
3199 return false;
3200 }
3201
3202 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3203 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3204
3205 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3206 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3207
3208 if (endLayer != nullptr)
3209 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003210 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003211 if (!isReshapeSupported)
3212 {
3213 return false;
3214 }
3215
Mike Kelly46272802019-08-14 17:00:48 +01003216 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3217 }
3218 else
3219 {
3220 return Fail("%s: ProcessActivation failed", __func__);
3221 }
3222}
3223
3224template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003225 typename HalOperation = typename HalPolicy::Operation,
3226 typename HalModel = typename HalPolicy::Model>
3227bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003228{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003229 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003230
Mike Kelly3c673942019-07-25 09:26:06 +01003231 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3232 if (!input.IsValid())
3233 {
3234 return Fail("%s: Operation has invalid inputs", __func__);
3235 }
3236
3237 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3238 unsigned int rank = inputInfo.GetNumDimensions();
3239
3240 armnn::PadDescriptor descriptor;
3241 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3242 {
3243 return Fail("%s: Could not convert paddings", __func__);
3244 }
3245
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003246 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3247 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003248 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3249 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3250 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003251 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003252 {
3253 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3254 }
3255
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003256 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003257 if (!output)
3258 {
3259 return Fail("%s: Could not read output", __func__);
3260 }
3261
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003262 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003263 if (IsDynamicTensor(outputInfo))
3264 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003265 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003266 }
3267
3268 bool isSupported = false;
3269 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3270 IsPadSupported,
3271 data.m_Backends,
3272 isSupported,
3273 inputInfo,
3274 outputInfo,
3275 descriptor);
3276 if (!isSupported)
3277 {
3278 return false;
3279 }
3280
3281 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3282 assert(layer != nullptr);
3283 input.Connect(layer->GetInputSlot(0));
3284 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3285
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003286 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003287}
3288
Mike Kelly0a879362019-07-29 16:56:31 +01003289template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003290 typename HalOperation = typename HalPolicy::Operation,
3291 typename HalModel = typename HalPolicy::Model>
3292bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003293{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003294 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003295
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003296 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3297 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3298 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003299
3300 if (inputOperand == nullptr
3301 || requestedShapeOperand == nullptr
3302 || outputOperand == nullptr)
3303 {
3304 return Fail("%s: Operation has invalid inputs", __func__);
3305 }
3306
3307 if (requestedShapeOperand->dimensions.size() != 1)
3308 {
3309 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3310 __func__, requestedShapeOperand->dimensions.size());
3311 }
3312
3313 std::vector<int32_t> targetDimensions;
3314 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3315 {
3316 return Fail("%s: Could not read values of input 1", __func__);
3317 }
3318
3319 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3320
3321 Shape requestedShape;
3322 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3323 // function that resolves these values into a fully specified tensor shape.
3324 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3325 {
3326 return Fail("%s: Failed to resolve the requested shape", __func__);
3327 }
3328
3329 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3330 if (!SameShape(requestedShape, outputOperandShape))
3331 {
3332 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3333 }
3334
3335 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3336 if (!input.IsValid())
3337 {
3338 return Fail("%s: Could not read input 0", __func__);
3339 }
3340
3341 armnn::ReshapeDescriptor reshapeDescriptor;
3342 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3343 requestedShape.dimensions.data());
3344
3345 bool isSupported = false;
3346 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3347 IsReshapeSupported,
3348 data.m_Backends,
3349 isSupported,
3350 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003351 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003352 reshapeDescriptor);
3353 if (!isSupported)
3354 {
3355 return false;
3356 }
3357
3358 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3359 assert(layer != nullptr);
3360 input.Connect(layer->GetInputSlot(0));
3361
3362 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3363}
3364
3365template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003366 typename HalOperation = typename HalPolicy::Operation,
3367 typename HalModel = typename HalPolicy::Model>
3368bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003369{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003370 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003371
Mike Kelly0a879362019-07-29 16:56:31 +01003372 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3373 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3374
3375 if (!input0.IsValid() || !input1.IsValid())
3376 {
3377 return Fail("%s: Operation has invalid inputs", __func__);
3378 }
3379
3380 // The FuseActivation parameter is always the input index 2
3381 // and it should be optional
3382 ActivationFn activationFunction;
3383 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3384 {
3385 return Fail("%s: Operation has invalid inputs", __func__);
3386 }
3387
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003388 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003389 if (!output)
3390 {
3391 return Fail("%s: Could not read output 0", __func__);
3392 }
3393
3394 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3395 if (IsDynamicTensor(outputInfo))
3396 {
3397 return Fail("%s: Dynamic output tensors are not supported", __func__);
3398 }
3399
3400 bool isSupported = false;
3401 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3402 IsSubtractionSupported,
3403 data.m_Backends,
3404 isSupported,
3405 input0.GetTensorInfo(),
3406 input1.GetTensorInfo(),
3407 outputInfo);
3408 if (!isSupported)
3409 {
3410 return false;
3411 }
3412
3413 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3414 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3415
3416 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3417 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3418
3419 if (endLayer)
3420 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003421 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003422 if (!isReshapeSupported)
3423 {
3424 return false;
3425 }
Mike Kelly0a879362019-07-29 16:56:31 +01003426 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3427 }
3428
3429 return Fail("%s: ProcessActivation failed", __func__);
3430}
3431
Finn Williams23b87b32019-07-30 11:44:05 +01003432template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003433 typename HalOperation = typename HalPolicy::Operation,
3434 typename HalModel = typename HalPolicy::Model>
3435bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003436{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003437 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003438
3439 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3440 if (!input.IsValid())
3441 {
3442 return Fail("%s: Operation has invalid inputs", __func__);
3443 }
3444
3445 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3446 unsigned int rank = inputInfo.GetNumDimensions();
3447 if (rank > 4)
3448 {
3449 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3450 }
3451
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003452 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003453 if (!output)
3454 {
3455 return Fail("%s: Could not read output 0", __func__);
3456 }
3457
3458 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3459 {
3460 return Fail("%s: Dynamic output tensors are not supported", __func__);
3461 }
3462
3463 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3464 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003465 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003466
3467 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3468
3469 std::vector<int32_t> axis;
3470 if (!axisOperand)
3471 {
3472 axis.assign(dimensionSequence,
3473 dimensionSequence + rank);
3474 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003475 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003476 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003477 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003478 }
3479
3480 std::vector<uint32_t> outputDims;
3481 for (unsigned int i = 0; i < rank; i++)
3482 {
3483 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3484 auto currentDimension = inputInfo.GetShape()[i];
3485 if (skipSqueeze || currentDimension != 1)
3486 {
3487 outputDims.push_back(currentDimension);
3488 }
3489 }
3490
3491 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3492
3493 armnn::TensorInfo outputInfo = inputInfo;
3494 outputInfo.SetShape(outShape);
3495
3496 armnn::ReshapeDescriptor reshapeDesc;
3497 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3498
3499 bool isSupported = false;
3500 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3501 IsReshapeSupported,
3502 data.m_Backends,
3503 isSupported,
3504 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003505 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003506 reshapeDesc);
3507 if (!isSupported)
3508 {
3509 return false;
3510 }
3511
3512 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3513 assert(layer != nullptr);
3514 input.Connect(layer->GetInputSlot(0));
3515
3516 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3517}
3518
3519template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003520 typename HalOperation = typename HalPolicy::Operation,
3521 typename HalModel = typename HalPolicy::Model>
3522bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003523{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003524 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003525
3526 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3527 if (!input.IsValid())
3528 {
3529 return Fail("%s: Operation has invalid inputs", __func__);
3530 }
3531
3532 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3533 unsigned int rank = inputInfo.GetNumDimensions();
3534 if (rank > 4)
3535 {
3536 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3537 }
3538
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003539 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003540 if (!output)
3541 {
3542 return Fail("%s: Could not read output 0", __func__);
3543 }
3544
3545 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3546 if (IsDynamicTensor(outputInfo))
3547 {
3548 return Fail("%s: Dynamic output tensors are not supported", __func__);
3549 }
3550
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003551 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3552 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3553 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003554
3555 std::vector<int32_t> beginValues;
3556 std::vector<int32_t> endValues;
3557 std::vector<int32_t> stridesValues;
3558
3559 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003560 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003561 {
3562 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3563 {
3564 return false;
3565 }
3566
3567 if (operandValues.size() != rank)
3568 {
3569 return false;
3570 }
3571
3572 return true;
3573 };
3574
3575 if (!ValidateInputOperands(*beginOperand, beginValues)
3576 || !ValidateInputOperands(*endOperand, endValues)
3577 || !ValidateInputOperands(*stridesOperand, stridesValues))
3578 {
3579 return Fail("%s: Operation has invalid input operand", __func__);
3580 }
3581
3582 // Stride cannot have value '0'
3583 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3584 {
3585 return Fail("%s: Stride must be non-zero value.", __func__);
3586 }
3587
3588 armnn::StridedSliceDescriptor descriptor;
3589 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3590 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3591 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3592 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3593
3594 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3595 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3596 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3597 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3598 {
3599 return Fail("%s: Operation has invalid inputs", __func__);
3600 }
3601
3602 bool isSupported = false;
3603 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3604 IsStridedSliceSupported,
3605 data.m_Backends,
3606 isSupported,
3607 inputInfo,
3608 outputInfo,
3609 descriptor);
3610 if (!isSupported)
3611 {
3612 return false;
3613 }
3614
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003615 // Check if slice can fit in a inferred output
3616 armnn::TensorShape inputShape = inputInfo.GetShape();
3617 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3618 {
3619 int stride = descriptor.m_Stride[i];
3620 int start = descriptor.GetStartForAxis(inputShape, i);
3621 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3622
3623 if (descriptor.m_ShrinkAxisMask & (1 << i))
3624 {
3625 // If the difference between the start point and the end point of the slice on an axis being shrunk
3626 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3627 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3628 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3629 {
3630 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3631 }
3632 }
3633 }
3634
Mike Kelly46272802019-08-14 17:00:48 +01003635 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3636 assert(layer != nullptr);
3637 input.Connect(layer->GetInputSlot(0));
3638
3639 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3640}
3641
3642template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003643 typename HalOperation = typename HalPolicy::Operation,
3644 typename HalModel = typename HalPolicy::Model>
3645bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003646{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003647 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003648
3649 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3650 if (!input.IsValid())
3651 {
3652 return Fail("%s: Operation has invalid inputs", __func__);
3653 }
3654
3655 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3656 unsigned int rank = inputInfo.GetNumDimensions();
3657 if (rank > 4)
3658 {
3659 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3660 }
3661
3662 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3663 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003664 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003665
3666 std::vector<int32_t> perm(rank);
3667 if (!permOperand)
3668 {
3669 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3670 for (unsigned int i = rank; i > 0; i--)
3671 {
3672 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3673 }
3674 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003675 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003676 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003677 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003678 }
3679
3680 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3681
Mike Kelly4a956582020-02-28 10:32:09 +00003682 armnn::TransposeDescriptor transposeDesc;
3683 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003684
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003685 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003686 if (!output)
3687 {
3688 return Fail("%s: Could not read output 0", __func__);
3689 }
3690
3691 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003692 if (IsDynamicTensor(outputInfo))
3693 {
3694 return Fail("%s: Dynamic output tensors are not supported", __func__);
3695 }
3696
Mike Kelly46272802019-08-14 17:00:48 +01003697
3698 bool isSupported = false;
3699 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003700 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003701 data.m_Backends,
3702 isSupported,
3703 inputInfo,
3704 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003705 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003706 if (!isSupported)
3707 {
3708 return false;
3709 }
3710
Mike Kelly4a956582020-02-28 10:32:09 +00003711 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003712 assert(layer != nullptr);
3713 input.Connect(layer->GetInputSlot(0));
3714
3715 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3716}
3717
3718template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003719 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003720 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003721 typename HalModel = typename HalPolicy::Model>
3722bool ConvertBatchToSpaceNd(const HalOperation& operation,
3723 const HalModel& model,
3724 ConversionData& data)
3725{
Finn Williams23b87b32019-07-30 11:44:05 +01003726
3727 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3728 if (!input.IsValid())
3729 {
3730 return Fail("%s: Operation has invalid inputs", __func__);
3731 }
3732
3733 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3734 if (!output)
3735 {
3736 return Fail("%s: Could not read output 0", __func__);
3737 }
3738
3739 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3740 if (IsDynamicTensor(outputInfo))
3741 {
3742 return Fail("%s: Dynamic output tensors are not supported", __func__);
3743 }
3744
3745 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3746 if (!blockOperand)
3747 {
3748 return Fail("%s: Could not read input 1", __func__);
3749 }
3750
3751 // Convert the block operand to int32
3752 std::vector<int32_t> block;
3753 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3754 {
3755 return Fail("%s: Input 1 has invalid values", __func__);
3756 }
3757
3758 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3759
3760 unsigned int rank = inputInfo.GetNumDimensions();
3761 if (rank != 4)
3762 {
3763 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3764 }
3765
3766 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3767 {
3768 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3769 " greater than or equal to 1", __func__);
3770 }
3771
3772 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3773 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3774 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3775
Kevin May42477c12020-03-26 13:34:14 +00003776 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003777 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003778 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003779 }
3780 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3781 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3782
3783 bool isSupported = false;
3784 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3785 IsBatchToSpaceNdSupported,
3786 data.m_Backends,
3787 isSupported,
3788 inputInfo,
3789 outputInfo,
3790 batchToSpaceNdDesc);
3791 if (!isSupported)
3792 {
3793 return false;
3794 }
3795
3796 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3797 assert(layer != nullptr);
3798 input.Connect(layer->GetInputSlot(0));
3799
3800 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3801}
Mike Kelly0a879362019-07-29 16:56:31 +01003802
Finn Williamsd74c5052019-07-30 17:06:00 +01003803template<typename HalPolicy,
3804 typename HalOperation = typename HalPolicy::Operation,
3805 typename HalOperand = typename HalPolicy::Operand,
3806 typename HalModel = typename HalPolicy::Model>
3807bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3808{
3809 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3810 if (!input.IsValid())
3811 {
3812 return Fail("%s: Operation has invalid inputs", __func__);
3813 }
3814
3815 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3816 unsigned int rank = inputInfo.GetNumDimensions();
3817 unsigned int spatialDim = rank - 2;
3818
3819 if (rank != 4)
3820 {
3821 Fail("%s: Only inputs with rank 4 are supported", __func__);
3822 }
3823
3824 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3825 if (!output)
3826 {
3827 return Fail("%s: Could not read output 0", __func__);
3828 }
3829
3830 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3831 if (IsDynamicTensor(outputInfo))
3832 {
3833 return Fail("%s: Dynamic output tensors are not supported", __func__);
3834 }
3835
3836 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3837 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3838
3839 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3840 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3841 {
3842 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3843 }
3844
3845 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003846 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3847 {
3848 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3849 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003850 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3851 {
3852 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3853 }
3854
3855 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3856 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3857 {
3858 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3859 }
3860
3861 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3862 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003863 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3864 {
3865 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3866 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003867 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3868 {
3869 int paddingBeforeInput = paddings[i];
3870 int paddingAfterInput = paddings[i + 1];
3871 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3872 {
3873 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3874 }
3875
3876 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3877 }
3878
3879 armnn::SpaceToBatchNdDescriptor descriptor;
3880 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3881 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3882 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3883
Kevin May42477c12020-03-26 13:34:14 +00003884 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01003885 {
3886 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3887 }
3888
3889 bool isSupported = false;
3890 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3891 IsSpaceToBatchNdSupported,
3892 data.m_Backends,
3893 isSupported,
3894 inputInfo,
3895 outputInfo,
3896 descriptor);
3897 if (!isSupported)
3898 {
3899 return false;
3900 }
3901
3902 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3903 assert(layer != nullptr);
3904 input.Connect(layer->GetInputSlot(0));
3905
3906 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3907}
3908
saoste01b8471482018-10-10 09:44:51 +01003909} // namespace armnn_driver