blob: 153813385e6ff02ea34662e6420caeb0a4aa49ae [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Kevin May42477c12020-03-26 13:34:14 +0000186#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000192 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 type == V1_2::OperandType::TENSOR_FLOAT16 ||
194 type == V1_2::OperandType::TENSOR_FLOAT32 ||
195 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
198 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199 type == V1_2::OperandType::TENSOR_INT32;
200}
201
202#endif
203
Kevin May42477c12020-03-26 13:34:14 +0000204#ifdef ARMNN_ANDROID_NN_V1_3
205
206// Support within the 1.3 driver for specific tensor data types
207inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
208{
209 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100210 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000211 type == V1_3::OperandType::TENSOR_FLOAT16 ||
212 type == V1_3::OperandType::TENSOR_FLOAT32 ||
213 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
216 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
217 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
218 type == V1_3::OperandType::TENSOR_INT32;
219}
220
221#endif
222
Mike Kellyb5fdf382019-06-11 16:35:25 +0100223inline bool IsBool(V1_0::Operand)
224{
225 return false;
226}
227
Kevin May42477c12020-03-26 13:34:14 +0000228inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100229{
230 return false;
231}
232
Kevin May42477c12020-03-26 13:34:14 +0000233#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234
235inline bool IsBool(V1_2::Operand operand)
236{
237 return operand.type == V1_2::OperandType::BOOL;
238}
239
Sadik Armagan61113162019-07-25 09:09:40 +0100240/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000241inline bool Is12OrLaterOperand(V1_2::Operand)
242{
243 return true;
244}
245
246#endif
247
248#ifdef ARMNN_ANDROID_NN_V1_3
249
250inline bool IsBool(V1_3::Operand operand)
251{
252 return operand.type == V1_3::OperandType::BOOL;
253}
254
255/// Checks if a operand is 1_2 Operand
256inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100257{
258 return true;
259}
260
Mike Kellyb5fdf382019-06-11 16:35:25 +0100261#endif
262
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100263template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000264armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
265 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100266 armnn::TensorInfo reshapeInfo)
267{
268 armnn::ReshapeDescriptor reshapeDescriptor;
269 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
270
271 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100272 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273
274 // Attach the input layer to the reshape layer
275 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
276 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
277
278 return *reshapeLayer;
279}
280
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000281bool BroadcastTensor(LayerInputHandle& input0,
282 LayerInputHandle& input1,
283 armnn::IConnectableLayer* startLayer,
284 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100285{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100286 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100287
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
289 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
290
291 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
292 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
293
294 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100295 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
297 input0.Connect(startLayer->GetInputSlot(0));
298 input1.Connect(startLayer->GetInputSlot(1));
299
Sadik Armagan64b19b52019-08-19 09:49:58 +0100300 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301 }
302
303 // Since the number of dimensions do not match then we need to add degenerate dimensions
304 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
305
306 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
307 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
308 boost::numeric_cast<int>(inputDimensions1));
309
310 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
311 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
312 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
313
314 const armnn::TensorShape& smallShape = smallInfo.GetShape();
315 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
316 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
317 {
318 reshapedDimensions[i] = smallShape[i - sizeDifference];
319 }
320
321 armnn::TensorInfo reshapedInfo = smallInfo;
322 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
323 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
326 armnn::ReshapeDescriptor reshapeDescriptor;
327
328 bool isSupported = false;
329 FORWARD_LAYER_SUPPORT_FUNC(__func__,
330 IsReshapeSupported,
331 data.m_Backends,
332 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000333 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100334 reshapedInfo,
335 reshapeDescriptor);
336 if (!isSupported)
337 {
338 return false;
339 }
340
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100341 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100343
344 if (input0IsSmaller)
345 {
346 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
347 //
348 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100349 // | |
350 // Reshape |
351 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100353
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
355 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100356 }
357 else
358 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100359 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
360 //
361 // Input0 Input1
362 // | |
363 // | Reshape
364 // \ /
365 // StartLayer
366
arovir01b0717b52018-09-05 17:03:25 +0100367 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100368 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100369 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100370
371 return true;
arovir01b0717b52018-09-05 17:03:25 +0100372}
373
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000374void CalcPadding(uint32_t input,
375 uint32_t kernel,
376 uint32_t stride,
377 uint32_t& outPadHead,
378 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100379 android::nn::PaddingScheme scheme)
380{
381 int32_t padHead;
382 int32_t padTail;
383 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
384 outPadHead = boost::numeric_cast<uint32_t>(padHead);
385 outPadTail = boost::numeric_cast<uint32_t>(padTail);
386}
387
Kevin May42477c12020-03-26 13:34:14 +0000388#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100389
390void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
391 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
392{
393 int32_t padHead;
394 int32_t padTail;
395 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
396 outPadHead = boost::numeric_cast<uint32_t>(padHead);
397 outPadTail = boost::numeric_cast<uint32_t>(padTail);
398}
399
Mike Kelly26123db2020-01-15 10:02:33 +0000400void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100401 int32_t& outPadTail, android::nn::PaddingScheme scheme)
402{
403 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
404}
405
Mike Kelly86b36d42019-07-12 16:39:33 +0100406#endif
407
Matthew Bentham912b3622019-05-03 15:49:14 +0100408Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100409{
410 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100411 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100412 shape.dimensions = operand.dimensions;
413 shape.scale = operand.scale;
414 shape.offset = operand.zeroPoint;
415 return shape;
416}
417
Kevin May42477c12020-03-26 13:34:14 +0000418#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100419
420Shape GetOperandShape(const V1_2::Operand& operand)
421{
422 Shape shape;
423 shape.type = OperandType(operand.type);
424 shape.dimensions = operand.dimensions;
425 shape.scale = operand.scale;
426 shape.offset = operand.zeroPoint;
427 return shape;
428}
429
430#endif
431
Kevin May42477c12020-03-26 13:34:14 +0000432#ifdef ARMNN_ANDROID_NN_V1_3
433
434Shape GetOperandShape(const V1_3::Operand& operand)
435{
436 Shape shape;
437 shape.type = OperandType(operand.type);
438 shape.dimensions = operand.dimensions;
439 shape.scale = operand.scale;
440 shape.offset = operand.zeroPoint;
441 return shape;
442}
443
444#endif
445
arovir01b0717b52018-09-05 17:03:25 +0100446// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
447// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100448// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
449// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100450void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000451 const armnn::TensorInfo& weightInfo,
452 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100453{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000454 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100455 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 // NOTE: Bias scale is always set to 0 for per-axis quantization and
457 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
458 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 return biasScale * inputInfo.GetQuantizationScale();
461 };
462
463 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
464 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
465
466 biasInfo.SetQuantizationScales(biasScales);
467 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
468
469 ALOGV("Bias quantization params have been updated for per-axis quantization");
470 }
471 else
472 {
473 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
474 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
475 {
476 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
477 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
478 {
479 ALOGW("Bias quantization scale has been modified to match input * weights");
480 biasInfo.SetQuantizationScale(expectedBiasScale);
481 }
arovir01b0717b52018-09-05 17:03:25 +0100482 }
483 }
484}
485
486// 4D Tensor Permutations
487const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100488const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
489
490// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000491const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
492const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100493
494template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000495armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
496 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100497{
498 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000499 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100500
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100501 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100502
503 // Connect input to swizzle layer
504 input.Connect(layer->GetInputSlot(0));
505
506 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000507 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100508 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
509
510 return *layer;
511}
512
arovir01b0717b52018-09-05 17:03:25 +0100513bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
514 const armnn::TensorShape & outputShape,
515 uint32_t concatDim)
516{
517 // Validate the output shape is correct given the input shapes (which have just been validated)
518 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
519 if (outputShape.GetNumDimensions() != numDimensions)
520 {
521 return Fail("%s: Output shape has wrong number of dimensions", __func__);
522 }
523
524 unsigned int outputSizeAlongConcatenatedDimension = 0;
525 for (unsigned int i = 0; i < inputShapes.size(); i++)
526 {
527 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
528 }
529
530 for (unsigned int i = 0; i < numDimensions; ++i)
531 {
532 if (i == concatDim)
533 {
534 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
535 {
536 return Fail(
537 "%s: Invalid output shape for dimension %d (%d != %d)",
538 __func__,
539 i,
540 outputShape[i],
541 outputSizeAlongConcatenatedDimension);
542 }
543 }
544 else
545 {
546 if (outputShape[i] != inputShapes[0][i])
547 {
548 return Fail("%s: Invalid output shape", __func__);
549 }
550 }
551 }
552
553 return true;
554}
555
556bool RequiresReshape(armnn::TensorShape & inputShape)
557{
558 return inputShape.GetNumDimensions() < 3;
559}
560
arovir01b0717b52018-09-05 17:03:25 +0100561void SwizzleInputs(armnn::INetwork& network,
562 std::vector<LayerInputHandle>& inputs,
563 std::vector<armnn::TensorShape>& inputShapes,
564 const armnn::PermutationVector& mapping)
565{
566 if (!mapping.IsEqual(IdentityPermutation4D))
567 {
568 size_t nInputs = inputs.size();
569 for (size_t i=0; i<nInputs; ++i)
570 {
571 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000572 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100573 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
574 auto& outputInfo = outputSlot.GetTensorInfo();
575 // replace inputs with the swizzled ones
576 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
577 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
578 }
579 }
580}
581
Teresa Charlin185f5882020-04-06 21:59:18 +0100582bool TransposeInputTensors(ConversionData& data,
583 std::vector<LayerInputHandle>& inputs,
584 std::vector<armnn::TensorShape>& inputShapes,
585 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000586{
587 if (!mapping.IsEqual(IdentityPermutation4D))
588 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100589 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590 size_t nInputs = inputs.size();
591 for (size_t i=0; i<nInputs; ++i)
592 {
593 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000594 armnn::TransposeDescriptor transposeDesc;
595 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100596 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000597
598 bool isSupported = false;
599 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000600 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601 data.m_Backends,
602 isSupported,
603 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100604 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000605 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606 if (!isSupported)
607 {
608 return false;
609 }
610
611 }
612 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
613 }
614 return true;
615}
616
617
narpra01f176d5a2018-11-18 20:17:48 +0000618bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
619 int32_t & concatDimension,
620 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100621{
narpra01f176d5a2018-11-18 20:17:48 +0000622 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100623 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100624
625 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000626 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
627 // or along dimension 0 or 2 for a 3-D tensor.
628 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100629 {
narpra01f176d5a2018-11-18 20:17:48 +0000630 concatDimension = 1;
631 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
632 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100633 }
narpra01f176d5a2018-11-18 20:17:48 +0000634 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100635 {
narpra01f176d5a2018-11-18 20:17:48 +0000636 concatDimension = 0;
637 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
638 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100639 }
narpra01f176d5a2018-11-18 20:17:48 +0000640 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100641}
642
643} // anonymous namespace
644
645namespace armnn_driver
646{
647
648//// Creates an ArmNN activation layer and connects it to the given layer, if the
649//// passed in AndroidNN activation function requires so.
650//// @return The end layer of the sequence of layers built for the given AndroidNN
651//// activation function or nullptr if an error occurred (e.g. unsupported activation).
652//// Note that the end layer matches the input layer if no activation is required
653//// (the sequence of layers has length 1).
654armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
655 ActivationFn activation,
656 armnn::IConnectableLayer* prevLayer,
657 ConversionData& data);
658
659} // namespace armnn_driver
660
661///
662/// Utility templates
663///
664
665namespace armnn_driver
666{
667
668using namespace android::nn;
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
671 typename HalOperand = typename HalPolicy::Operand,
672 typename HalOperation = typename HalPolicy::Operation,
673 typename HalModel = typename HalPolicy::Model>
674const HalOperand* GetInputOperand(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100677 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100678{
679 if (inputIndex >= operation.inputs.size())
680 {
saoste01b8471482018-10-10 09:44:51 +0100681 if (failOnIndexOutOfBounds)
682 {
683 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
684 }
arovir01b0717b52018-09-05 17:03:25 +0100685 return nullptr;
686 }
687
Kevin May42477c12020-03-26 13:34:14 +0000688 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100689 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000690 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalOperation = typename HalPolicy::Operation,
696 typename HalModel = typename HalPolicy::Model>
697const HalOperand* GetOutputOperand(const HalOperation& operation,
698 uint32_t outputIndex,
699 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100700{
701 if (outputIndex >= operation.outputs.size())
702 {
703 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
704 return nullptr;
705 }
706
707 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100708 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100709
Kevin May42477c12020-03-26 13:34:14 +0000710 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100716const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100717 const HalModel& model,
718 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100720{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100722
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100724 switch (operand.lifetime)
725 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100727 {
728 // Constant found in model.operandValues
729 valueStart = &model.operandValues[operand.location.offset];
730 break;
731 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100733 {
734 // Constant specified via a Memory object
735 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
736 break;
737 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000739 {
740 // An optional input tensor with no values is not an error so should not register as a fail
741 if (optional)
742 {
743 valueStart = nullptr;
744 break;
745 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100746 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000747 }
arovir01b0717b52018-09-05 17:03:25 +0100748 default:
749 {
750 // Unsupported/invalid (e.g. can't get value of an input to the model)
751 Fail("%s: unsupported/invalid operand lifetime: %s",
752 __func__, toString(operand.lifetime).c_str());
753 valueStart = nullptr;
754 }
755 }
756
757 return valueStart;
758}
759
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100760template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100761 typename HalOperation = typename HalPolicy::Operation,
762 typename HalModel = typename HalPolicy::Model,
763 typename HalOperandType = typename HalPolicy::OperandType>
764bool GetOperandType(const HalOperation& operation,
765 uint32_t inputIndex,
766 const HalModel& model,
767 HalOperandType& type)
768{
769 using HalOperand = typename HalPolicy::Operand;
770
771 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
772 if (!operand)
773 {
774 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
775 }
776
777 type = operand->type;
778 return true;
779}
780
781template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000782 typename HalOperand = typename HalPolicy::Operand>
783bool IsOperandConstant(const HalOperand& operand)
784{
785 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
786
787 HalOperandLifeTime lifetime = operand.lifetime;
788
789 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
790 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
791 lifetime == HalOperandLifeTime::NO_VALUE;
792}
793
794template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 typename HalOperand = typename HalPolicy::Operand,
796 typename HalModel = typename HalPolicy::Model>
797ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
798 const HalModel& model,
799 const ConversionData& data,
800 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
801 const armnn::TensorShape* overrideTensorShape = nullptr,
802 bool optional = false)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 if (!IsOperandTypeSupportedForTensors(operand.type))
805 {
806 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
807 return ConstTensorPin();
808 }
809
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000810 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 {
812 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
813 return ConstTensorPin();
814 }
815
816 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
817 if (!valueStart)
818 {
819 if (optional)
820 {
821 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
822 return ConstTensorPin(true);
823 }
824 // mandatory tensor with no values
825 Fail("%s: failed to get operand address", __func__);
826 return ConstTensorPin();
827 }
828
829 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000830 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
831 if (tensorInfo.HasPerAxisQuantization())
832 {
833 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
834 }
835
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 if (overrideTensorShape != nullptr)
837 {
838 tensorInfo.SetShape(*overrideTensorShape);
839 }
840 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
841}
842
843template<typename HalPolicy,
844 typename HalOperation = typename HalPolicy::Operation,
845 typename HalModel = typename HalPolicy::Model>
846ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
847 uint32_t inputIndex,
848 const HalModel& model,
849 const ConversionData& data,
850 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
851 const armnn::TensorShape* overrideTensorShape = nullptr,
852 bool optional = false)
853{
854 using HalOperand = typename HalPolicy::Operand;
855
856 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
857 if (!operand)
858 {
859 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
860 return ConstTensorPin();
861 }
862 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
863 model,
864 data,
865 dimensionMappings,
866 overrideTensorShape,
867 optional);
868}
869
870template<typename HalPolicy,
871 typename OutputType,
872 typename HalOperandType = typename HalPolicy::OperandType,
873 typename HalOperation = typename HalPolicy::Operation,
874 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100875bool GetInputScalar(const HalOperation& operation,
876 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100877 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100878 OutputType& outValue,
879 const HalModel& model,
880 const ConversionData& data)
881{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100882 using HalOperand = typename HalPolicy::Operand;
883
884 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100885 if (!operand)
886 {
887 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
888 }
889
890 if (operand->type != type)
891 {
892 return Fail("%s: unexpected operand type: %s (should be %s)",
893 __func__, toString(operand->type).c_str(), toString(type).c_str());
894 }
895
896 if (operand->location.length != sizeof(OutputType))
897 {
898 return Fail("%s: incorrect operand location length: %i (should be %i)",
899 __func__, operand->location.length, sizeof(OutputType));
900 }
901
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100903 if (!valueAddress)
904 {
905 return Fail("%s: failed to get address for operand", __func__);
906 }
907
908 outValue = *(static_cast<const OutputType*>(valueAddress));
909 return true;
910}
911
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100912template<typename HalPolicy,
913 typename HalOperation = typename HalPolicy::Operation,
914 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100915bool GetInputInt32(const HalOperation& operation,
916 uint32_t inputIndex,
917 int32_t& outValue,
918 const HalModel& model,
919 const ConversionData& data)
920{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100921 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100922}
923
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100924template<typename HalPolicy,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100927bool GetInputFloat32(const HalOperation& operation,
928 uint32_t inputIndex,
929 float& outValue,
930 const HalModel& model,
931 const ConversionData& data)
932{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100934}
935
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936template<typename HalPolicy,
937 typename HalOperation = typename HalPolicy::Operation,
938 typename HalOperandType = typename HalPolicy::OperandType,
939 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100940bool GetInputActivationFunctionImpl(const HalOperation& operation,
941 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100942 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100943 ActivationFn& outActivationFunction,
944 const HalModel& model,
945 const ConversionData& data)
946{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100947 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100948 {
949 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
950 __func__,
951 toString(type).c_str(),
952 toString(OperandType::INT32).c_str(),
953 toString(OperandType::TENSOR_INT32).c_str());
954 }
955
956 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100957 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100958 {
959 return Fail("%s: failed to get activation input value", __func__);
960 }
961 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
962 return true;
963}
964
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100965template<typename HalPolicy,
966 typename HalOperation = typename HalPolicy::Operation,
967 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100968bool GetInputActivationFunction(const HalOperation& operation,
969 uint32_t inputIndex,
970 ActivationFn& outActivationFunction,
971 const HalModel& model,
972 const ConversionData& data)
973{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100974 return GetInputActivationFunctionImpl<HalPolicy>(operation,
975 inputIndex,
976 HalPolicy::OperandType::INT32,
977 outActivationFunction,
978 model,
979 data);
arovir01b0717b52018-09-05 17:03:25 +0100980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
986 uint32_t inputIndex,
987 ActivationFn& outActivationFunction,
988 const HalModel& model,
989 const ConversionData& data)
990{
991 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 return GetInputActivationFunctionImpl<HalPolicy>(operation,
993 inputIndex,
994 HalPolicy::OperandType::INT32,
995 outActivationFunction,
996 model,
997 data);
arovir01b0717b52018-09-05 17:03:25 +0100998}
999
1000
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001template<typename HalPolicy,
1002 typename HalOperation = typename HalPolicy::Operation,
1003 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001004bool GetOptionalInputActivation(const HalOperation& operation,
1005 uint32_t inputIndex,
1006 ActivationFn& activationFunction,
1007 const HalModel& model,
1008 const ConversionData& data)
1009{
1010 if (operation.inputs.size() <= inputIndex)
1011 {
1012 activationFunction = ActivationFn::kActivationNone;
1013 }
1014 else
1015 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001017 {
1018 return Fail("%s: Operation has invalid inputs", __func__);
1019 }
1020 }
1021 return true;
1022}
1023
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001024template<typename HalPolicy,
1025 typename ConvolutionDescriptor,
1026 typename HalOperation = typename HalPolicy::Operation,
1027 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001028bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1029 uint32_t dilationXIndex,
1030 ConvolutionDescriptor& descriptor,
1031 const HalModel& model,
1032 const ConversionData& data)
1033{
1034 bool success = true;
1035 if (operation.inputs.size() >= dilationXIndex + 2)
1036 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037 success &= GetInputScalar<HalPolicy>(operation,
1038 dilationXIndex,
1039 HalPolicy::OperandType::INT32,
1040 descriptor.m_DilationX,
1041 model,
1042 data);
1043 success &= GetInputScalar<HalPolicy>(operation,
1044 dilationXIndex + 1,
1045 HalPolicy::OperandType::INT32,
1046 descriptor.m_DilationY,
1047 model,
1048 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001049 }
1050
1051 return success;
1052}
1053
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001054template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001055 typename HalOperation = typename HalPolicy::Operation,
1056 typename HalModel = typename HalPolicy::Model>
1057bool GetOptionalBool(const HalOperation& operation,
1058 uint32_t inputIndex,
1059 const HalModel& model,
1060 const ConversionData& data)
1061{
1062 using HalOperand = typename HalPolicy::Operand;
1063
1064 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1065 if (!operand)
1066 {
1067 return false;
1068 }
1069
1070 if (!IsBool(*operand))
1071 {
1072 return false;
1073 }
1074
1075 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1076 if (!valueAddress)
1077 {
1078 return false;
1079 }
1080
1081 if (*(static_cast<const bool*>(valueAddress)))
1082 {
1083 return true;
1084 }
1085 else
1086 {
1087 return false;
1088 }
1089}
1090
1091template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001092 typename HalOperand = typename HalPolicy::Operand,
1093 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001094bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001095 std::vector<int32_t>& outValues,
1096 const HalModel& model,
1097 const ConversionData& data)
1098{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001100 {
1101 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1102 }
1103
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001104 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001105 if (!startAddress)
1106 {
1107 return Fail("%s: failed to get operand address", __func__, operand.type);
1108 }
1109
1110 // Check number of bytes is sensible
1111 const uint32_t numBytes = operand.location.length;
1112 if (numBytes % sizeof(int32_t) != 0)
1113 {
1114 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1115 __func__, numBytes, sizeof(int32_t));
1116 }
1117
1118 outValues.resize(numBytes / sizeof(int32_t));
1119 memcpy(outValues.data(), startAddress, numBytes);
1120 return true;
1121}
1122
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001123template<typename HalPolicy,
1124 typename HalOperation = typename HalPolicy::Operation,
1125 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001126bool GetInputPaddingScheme(const HalOperation& operation,
1127 uint32_t inputIndex,
1128 PaddingScheme& outPaddingScheme,
1129 const HalModel& model,
1130 const ConversionData& data)
1131{
1132 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001133 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001134 {
1135 return Fail("%s: failed to get padding scheme input value", __func__);
1136 }
1137
1138 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1139 return true;
1140}
1141
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142template<typename HalPolicy,
1143 typename HalOperation = typename HalPolicy::Operation,
1144 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001145LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1146 uint32_t inputIndex,
1147 const HalModel& model,
1148 ConversionData& data)
1149{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001151 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001152 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1153
1154 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001155 if (!operand)
1156 {
1157 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1158 return LayerInputHandle();
1159 }
1160
1161 if (!IsOperandTypeSupportedForTensors(operand->type))
1162 {
1163 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1164 return LayerInputHandle();
1165 }
1166
Sadik Armagan44bcc022019-06-18 17:21:36 +01001167 try
arovir01b0717b52018-09-05 17:03:25 +01001168 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001169 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001170 if (IsDynamicTensor(operandTensorInfo))
1171 {
1172 Fail("%s: dynamic input tensors are not supported", __func__);
1173 return LayerInputHandle();
1174 }
arovir01b0717b52018-09-05 17:03:25 +01001175
Sadik Armagan44bcc022019-06-18 17:21:36 +01001176 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001177 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001178 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001179 {
1180 // NOTE: We must check whether we can support the input tensor on at least one
1181 // of the provided backends; otherwise we cannot convert the operation
1182 bool isInputSupported = false;
1183 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1184 IsInputSupported,
1185 data.m_Backends,
1186 isInputSupported,
1187 operandTensorInfo);
1188
1189 if (!isInputSupported)
1190 {
1191 Fail("%s: unsupported input tensor", __func__);
1192 return LayerInputHandle();
1193 }
1194
1195 BOOST_FALLTHROUGH; // intentional fallthrough
1196 }
1197 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001198 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001199 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001200 // The tensor is either an operand internal to the model, or a model input.
1201 // It can be associated with an ArmNN output slot for an existing layer.
1202
1203 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1204 const uint32_t operandIndex = operation.inputs[inputIndex];
1205 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001206 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001207 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001208 case HalOperandLifeTime::CONSTANT_REFERENCE:
1209 {
1210 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1211 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1212 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001213 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001214 bool isSupported = false;
1215 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1216 IsConstantSupported,
1217 data.m_Backends,
1218 isSupported,
1219 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001220 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001221 {
1222 return LayerInputHandle();
1223 }
1224
1225 armnn::IConnectableLayer* constantLayer =
1226 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1227 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1228 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1229
1230 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1231 }
1232 else
1233 {
1234 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001235 return LayerInputHandle();
1236 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001237 break;
arovir01b0717b52018-09-05 17:03:25 +01001238 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001239 default:
arovir01b0717b52018-09-05 17:03:25 +01001240 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001241 // Unsupported lifetime for an input tensor
1242 Fail("%s: unsupported lifetime for input tensor: %s",
1243 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001244 return LayerInputHandle();
1245 }
arovir01b0717b52018-09-05 17:03:25 +01001246 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001247 }
1248 catch (UnsupportedOperand<HalOperandType>& e)
1249 {
1250 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1251 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001252 }
1253}
1254
Kevin May42477c12020-03-26 13:34:14 +00001255
1256#ifdef ARMNN_ANDROID_NN_V1_3
1257template<typename HalPolicy>
1258LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1259 uint32_t inputIndex,
1260 const::android::hardware::neuralnetworks::V1_3::Model& model,
1261 ConversionData& data)
1262{
1263 using HalOperand = typename HalPolicy::Operand;
1264 using HalOperandType = typename HalPolicy::OperandType;
1265 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1266
1267 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1268 if (!operand)
1269 {
1270 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1271 return LayerInputHandle();
1272 }
1273
1274 if (!IsOperandTypeSupportedForTensors(operand->type))
1275 {
1276 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1277 return LayerInputHandle();
1278 }
1279
1280 try
1281 {
1282 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1283 if (IsDynamicTensor(operandTensorInfo))
1284 {
1285 Fail("%s: dynamic input tensors are not supported", __func__);
1286 return LayerInputHandle();
1287 }
1288
1289 switch (operand->lifetime)
1290 {
1291 case HalOperandLifeTime::SUBGRAPH_INPUT:
1292 {
1293 // NOTE: We must check whether we can support the input tensor on at least one
1294 // of the provided backends; otherwise we cannot convert the operation
1295 bool isInputSupported = false;
1296 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1297 IsInputSupported,
1298 data.m_Backends,
1299 isInputSupported,
1300 operandTensorInfo);
1301
1302 if (!isInputSupported)
1303 {
1304 Fail("%s: unsupported input tensor", __func__);
1305 return LayerInputHandle();
1306 }
1307
1308 BOOST_FALLTHROUGH; // intentional fallthrough
1309 }
1310 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1311 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1312 {
1313 // The tensor is either an operand internal to the model, or a model input.
1314 // It can be associated with an ArmNN output slot for an existing layer.
1315
1316 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1317 const uint32_t operandIndex = operation.inputs[inputIndex];
1318 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1319 }
1320 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1321 case HalOperandLifeTime::CONSTANT_REFERENCE:
1322 {
1323 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1324 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1325 if (tensorPin.IsValid())
1326 {
1327 bool isSupported = false;
1328 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1329 IsConstantSupported,
1330 data.m_Backends,
1331 isSupported,
1332 tensorPin.GetConstTensor().GetInfo());
1333 if (!isSupported)
1334 {
1335 return LayerInputHandle();
1336 }
1337
1338 armnn::IConnectableLayer* constantLayer =
1339 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1340 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1341 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1342
1343 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1344 }
1345 else
1346 {
1347 Fail("%s: invalid operand tensor", __func__);
1348 return LayerInputHandle();
1349 }
1350 break;
1351 }
1352 default:
1353 {
1354 // Unsupported lifetime for an input tensor
1355 Fail("%s: unsupported lifetime for input tensor: %s",
1356 __func__, toString(operand->lifetime).c_str());
1357 return LayerInputHandle();
1358 }
1359 }
1360 }
1361 catch (UnsupportedOperand<HalOperandType>& e)
1362 {
1363 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1364 return LayerInputHandle();
1365 }
1366}
1367#endif
1368
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001369template<typename HalPolicy,
1370 typename HalOperation = typename HalPolicy::Operation,
1371 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001372bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1373 uint32_t operationOutputIndex,
1374 armnn::IConnectableLayer& layer,
1375 uint32_t layerOutputIndex,
1376 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001377 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001378{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001379 using HalOperand = typename HalPolicy::Operand;
1380
1381 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001382 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1383 {
1384 return false;
1385 }
1386
1387 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1388
1389 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1390 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1391
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001392 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001393
1394 return true;
1395}
1396
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001397template<typename HalPolicy,
1398 typename HalOperation = typename HalPolicy::Operation,
1399 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001400armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1401 uint32_t inputIndex,
1402 const HalModel& model,
1403 ConversionData& data)
1404{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001405 using HalOperand = typename HalPolicy::Operand;
1406
1407 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001408 if (!operand)
1409 {
1410 return armnn::DataLayout::NHWC;
1411 }
1412
1413 if (!IsBool(*operand))
1414 {
1415 return armnn::DataLayout::NHWC;
1416 }
1417
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001418 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001419 if (!valueAddress)
1420 {
1421 return armnn::DataLayout::NHWC;
1422 }
1423
1424 if (*(static_cast<const bool*>(valueAddress)))
1425 {
1426 return armnn::DataLayout::NCHW;
1427 }
1428 else
1429 {
1430 return armnn::DataLayout::NHWC;
1431 }
1432}
1433
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001434template<typename HalPolicy,
1435 typename HalOperation = typename HalPolicy::Operation,
1436 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001437bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1438 uint32_t outputIndex,
1439 armnn::IConnectableLayer& layer,
1440 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001441 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001442{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001443 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1444 outputIndex,
1445 layer,
1446 outputIndex,
1447 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001448 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001449}
1450
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001451template<typename HalPolicy,
1452 typename HalOperation = typename HalPolicy::Operation,
1453 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001454bool ConvertToActivation(const HalOperation& operation,
1455 const char* operationName,
1456 const armnn::ActivationDescriptor& activationDesc,
1457 const HalModel& model,
1458 ConversionData& data)
1459{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001460 using HalOperand = typename HalPolicy::Operand;
1461
1462 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001463 if (!input.IsValid())
1464 {
1465 return Fail("%s: Input 0 is invalid", operationName);
1466 }
1467
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001468 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001469 if (!outputOperand)
1470 {
1471 return false;
1472 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001473
1474 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001475 if (IsDynamicTensor(outInfo))
1476 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001477 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001478 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001479
1480 bool isSupported = false;
1481 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1482 IsActivationSupported,
1483 data.m_Backends,
1484 isSupported,
1485 input.GetTensorInfo(),
1486 outInfo,
1487 activationDesc);
1488 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001489 {
1490 return false;
1491 }
1492
1493 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001494 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001495 input.Connect(layer->GetInputSlot(0));
1496
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001497 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001498}
1499
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001500template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001501 typename HalOperation = typename HalPolicy::Operation,
1502 typename HalModel = typename HalPolicy::Model>
1503bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1504{
1505 armnn::ActivationDescriptor desc;
1506 desc.m_Function = armnn::ActivationFunction::ReLu;
1507
1508 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1509}
1510
1511template<typename HalPolicy,
1512 typename HalOperation = typename HalPolicy::Operation,
1513 typename HalModel = typename HalPolicy::Model>
1514bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1515{
1516 armnn::ActivationDescriptor desc;
1517 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1518 desc.m_A = 1.0f;
1519 desc.m_B = -1.0f;
1520
1521 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1522}
1523
1524template<typename HalPolicy,
1525 typename HalOperation = typename HalPolicy::Operation,
1526 typename HalModel = typename HalPolicy::Model>
1527bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1528{
1529 armnn::ActivationDescriptor desc;
1530 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1531 desc.m_A = 6.0f;
1532
1533 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1534}
1535
1536template<typename HalPolicy,
1537 typename HalOperation = typename HalPolicy::Operation,
1538 typename HalModel = typename HalPolicy::Model>
1539bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1540{
1541 armnn::ActivationDescriptor desc;
1542 desc.m_Function = armnn::ActivationFunction::TanH;
1543 desc.m_A = 1.0f; // android nn does not support tanH parameters
1544 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1545
1546 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1547}
1548
1549template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001550 typename HalOperation = typename HalPolicy::Operation,
1551 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001552bool ConvertPaddings(const HalOperation& operation,
1553 const HalModel& model,
1554 ConversionData& data,
1555 unsigned int rank,
1556 armnn::PadDescriptor& padDescriptor)
1557{
1558 using HalOperand = typename HalPolicy::Operand;
1559
1560 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1561 if (!paddingsOperand)
1562 {
1563 return Fail("%s: Could not read paddings operand", __func__);
1564 }
1565
1566 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1567 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1568 {
1569 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1570 }
1571
1572 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001573 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1574 {
1575 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1576 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001577
1578 // add padding for each dimension of input tensor.
1579 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1580 {
1581 int paddingBeforeInput = paddings[i];
1582 int paddingAfterInput = paddings[i + 1];
1583
1584 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1585 {
1586 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1587 }
1588
1589 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1590 }
1591
1592 return true;
1593}
1594
1595template<typename HalPolicy,
1596 typename HalOperation = typename HalPolicy::Operation,
1597 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001598bool ConvertPooling2d(const HalOperation& operation,
1599 const char* operationName,
1600 armnn::PoolingAlgorithm poolType,
1601 const HalModel& model,
1602 ConversionData& data)
1603{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001604 using HalOperand = typename HalPolicy::Operand;
1605 using HalOperandType = typename HalPolicy::OperandType;
1606
1607 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001608 if (!input.IsValid())
1609 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001610 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001611 }
1612
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001613 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001614 if (!output)
1615 {
1616 return Fail("%s: Could not read output 0", __func__);
1617 }
1618
1619 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1620 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1621
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001622 if (IsDynamicTensor(outputInfo))
1623 {
1624 return Fail("%s: Dynamic output tensors are not supported", __func__);
1625 }
1626
arovir01b0717b52018-09-05 17:03:25 +01001627 armnn::Pooling2dDescriptor desc;
1628 desc.m_PoolType = poolType;
1629 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001630 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001631
1632 ActivationFn activation;
1633
Sadik Armagan15d63e22019-07-26 16:59:35 +01001634 auto inputSize = operation.inputs.size();
1635
1636 if (inputSize >= 10)
1637 {
1638 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1639 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1640 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1641 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1642 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1643 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1644 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1645 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1646 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1647 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1648 {
1649 return Fail("%s: Operation has invalid inputs", operationName);
1650 }
1651
Kevin May42477c12020-03-26 13:34:14 +00001652 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001653 {
1654 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1655 }
1656 }
1657 else
arovir01b0717b52018-09-05 17:03:25 +01001658 {
1659 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1660 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001661 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1662 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1663 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1664 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1665 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1666 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001667 {
1668 return Fail("%s: Operation has invalid inputs", operationName);
1669 }
1670
Kevin May42477c12020-03-26 13:34:14 +00001671 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001672 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001673 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001674 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001675
1676 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1677 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1678 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1679
1680 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1681 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001682 }
1683
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001684 bool isSupported = false;
1685 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1686 IsPooling2dSupported,
1687 data.m_Backends,
1688 isSupported,
1689 inputInfo,
1690 outputInfo,
1691 desc);
1692 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001693 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001694 return false;
arovir01b0717b52018-09-05 17:03:25 +01001695 }
arovir01b0717b52018-09-05 17:03:25 +01001696
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001697 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1698 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001699 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001700 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001701 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001702
1703 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1704 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001705 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001706 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001707 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001708
1709 input.Connect(pooling2dLayer->GetInputSlot(0));
1710
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001711 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001712}
1713
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001714template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001715 typename HalOperation = typename HalPolicy::Operation,
1716 typename HalModel = typename HalPolicy::Model>
1717bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001718{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001719 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001720
1721 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1722 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1723
1724 if (!input0.IsValid() || !input1.IsValid())
1725 {
1726 return Fail("%s: Operation has invalid inputs", __func__);
1727 }
1728
1729 // The FuseActivation parameter is always the input index 2
1730 // and it should be optional
1731 ActivationFn activationFunction;
1732 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1733 {
1734 return Fail("%s: Operation has invalid inputs", __func__);
1735 }
1736
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001737 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001738 if (!outputOperand)
1739 {
1740 return false;
1741 }
1742
1743 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1744 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1745
1746 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1747 if (IsDynamicTensor(outputInfo))
1748 {
1749 return Fail("%s: Dynamic output tensors are not supported", __func__);
1750 }
1751
1752 bool isSupported = false;
1753 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1754 IsAdditionSupported,
1755 data.m_Backends,
1756 isSupported,
1757 inputInfo0,
1758 inputInfo1,
1759 outputInfo);
1760 if (!isSupported)
1761 {
1762 return false;
1763 }
1764
1765 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1766 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1767
1768 if (endLayer != nullptr)
1769 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001770 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001771 if (!isReshapeSupported)
1772 {
1773 return false;
1774 }
1775
Mike Kelly46272802019-08-14 17:00:48 +01001776 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1777 }
1778 else
1779 {
1780 return Fail("%s: ProcessActivation failed", __func__);
1781 }
1782}
1783
1784template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001785 typename HalOperation = typename HalPolicy::Operation,
1786 typename HalModel = typename HalPolicy::Model>
1787bool ConvertArgMinMax(const HalOperation& operation,
1788 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001789 ConversionData& data,
1790 armnn::ArgMinMaxFunction argMinMaxFunction)
1791{
1792 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1793
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001794 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001795 using HalOperandType = typename HalPolicy::OperandType;
1796
1797 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1798
1799 if (!input0.IsValid())
1800 {
1801 return Fail("%s: Operation has invalid inputs", __func__);
1802 }
1803
1804 int32_t axis;
1805 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1806 {
1807 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1808 }
1809
1810 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1811 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1812
1813 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1814 {
1815 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1816 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1817 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1818 return Fail("%s: Axis must be in range [-n, n)", __func__);
1819 }
1820
1821 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1822 if (!output)
1823 {
1824 return Fail("%s: Could not read output 0", __func__);
1825 }
1826
1827 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1828
1829 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1830 if (IsDynamicTensor(outputInfo))
1831 {
1832 return Fail("%s: Dynamic output tensors are not supported", __func__);
1833 }
1834
1835 armnn::ArgMinMaxDescriptor descriptor;
1836 descriptor.m_Function = argMinMaxFunction;
1837 descriptor.m_Axis = axis;
1838
1839 bool isSupported = false;
1840 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1841 IsArgMinMaxSupported,
1842 data.m_Backends,
1843 isSupported,
1844 inputInfo0,
1845 outputInfo,
1846 descriptor);
1847 if (!isSupported)
1848 {
1849 return false;
1850 }
1851
1852 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1853 assert(layer != nullptr);
1854
1855 input0.Connect(layer->GetInputSlot(0));
1856
1857 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1858}
1859
1860template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001861 typename HalOperation = typename HalPolicy::Operation,
1862 typename HalModel = typename HalPolicy::Model>
1863bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001864{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001865 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001866 using HalOperandType = typename HalPolicy::OperandType;
1867
1868 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1869 if (operation.inputs.size() <= 1)
1870 {
1871 return Fail("%s: Operation has insufficient arguments", __func__);
1872 }
1873
1874 // Get inputs and outputs
1875 const std::size_t numInputTensors = operation.inputs.size() - 1;
1876
1877 int32_t concatDim;
1878 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1879 {
1880 return Fail("%s: Operation has invalid inputs", __func__);
1881 }
1882
1883 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1884 if (!outputOperand)
1885 {
1886 return Fail("%s: Operation has no outputs", __func__);
1887 }
1888
1889
1890 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1891 armnn::TensorShape outputShape = outputInfo.GetShape();
1892
1893 //
1894 // handle negative concat dims along the lines of tensorflow as described here:
1895 // https://www.tensorflow.org/api_docs/python/tf/concat
1896 // "negative axis refers to axis + rank(values)-th dimension"
1897 //
1898 if (concatDim < 0)
1899 {
1900 concatDim += outputShape.GetNumDimensions();
1901 }
1902
1903 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1904 {
1905 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1906 }
1907
1908 std::vector<LayerInputHandle> inputHandles;
1909 std::vector<armnn::TensorShape> inputShapes;
1910
1911 inputHandles.reserve(numInputTensors);
1912 inputShapes.reserve(numInputTensors);
1913
1914 bool inputsHaveBeenReshaped = false;
1915 unsigned int tensorDimensionsAdded = 0;
1916
1917 for (uint32_t i = 0; i < numInputTensors; ++i)
1918 {
1919 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1920 if (!operand)
1921 {
1922 return Fail("%s: Operation has invalid inputs", __func__);
1923 }
1924
Teresa Charlin3b959602019-10-31 17:05:47 +00001925 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1926 if (!operandInputHandle.IsValid())
1927 {
1928 return Fail("%s: Operation has invalid inputs", __func__);
1929 }
Mike Kellyb8805202019-07-31 17:25:43 +01001930
Teresa Charlin3b959602019-10-31 17:05:47 +00001931 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001932 if (operandShape.GetNumDimensions() == 0)
1933 {
1934 return Fail("%s: Operands with rank 0 are not supported", __func__);
1935 }
1936
1937 if (RequiresReshape(operandShape))
1938 {
1939 inputsHaveBeenReshaped = true;
1940
1941 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1942
1943 // Expand the tensor to three dimensions
1944 if (operandShape.GetNumDimensions() == 2)
1945 {
1946 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1947 tensorDimensionsAdded = 1;
1948 }
1949 else
1950 {
1951 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1952 tensorDimensionsAdded = 2;
1953 }
1954
Kevin Mayaed08ac2019-12-12 16:33:31 +00001955 armnn::ReshapeDescriptor reshapeDescriptor;
1956 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1957
1958 bool isSupported = false;
1959 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1960 IsReshapeSupported,
1961 data.m_Backends,
1962 isSupported,
1963 operandInputHandle.GetTensorInfo(),
1964 reshapeInfo,
1965 reshapeDescriptor);
1966 if (!isSupported)
1967 {
1968 return false;
1969 }
1970
Mike Kellyb8805202019-07-31 17:25:43 +01001971 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1972 *data.m_Network,
1973 operandInputHandle,
1974 reshapeInfo
1975 );
1976
1977 // Point to the reshape operation rather then the input operation
1978 operandShape = reshapeInfo.GetShape();
1979 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1980 }
1981
1982 inputShapes.emplace_back(operandShape);
1983 inputHandles.emplace_back(operandInputHandle);
1984
1985 if (!inputHandles.back().IsValid())
1986 {
1987 return Fail("%s: Operation has invalid inputs", __func__);
1988 }
1989 }
1990
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001991 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01001992
1993 if (inputsHaveBeenReshaped)
1994 {
1995 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1996 concatDim += tensorDimensionsAdded;
1997
1998 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1999 if (tensorDimensionsAdded == 1)
2000 {
2001 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2002 }
2003 else if (tensorDimensionsAdded == 2)
2004 {
2005 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2006 }
2007 }
2008
2009 // Check if permutations is required and get the pair of permutations required for the concatenation.
2010 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2011 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
2012 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
2013
2014 bool needPermute =
2015 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
2016
2017 if (needPermute)
2018 {
Mike Kelly4a956582020-02-28 10:32:09 +00002019 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01002020 }
2021
2022 outputInfo.SetShape(outputShape);
2023
2024 // this is no-op for identity swizzles, otherwise it replaces both
2025 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002026 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002027 {
2028 return false;
2029 }
Mike Kellyb8805202019-07-31 17:25:43 +01002030
2031 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2032 armnn::OriginsDescriptor concatDescriptor;
2033
2034 try
2035 {
2036 // The concat descriptor is always created across the only supported concat dimension
2037 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2038 concatDescriptor =
2039 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2040 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002041 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002042 {
2043 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2044 }
2045
2046 // Validate the output shape is correct given the input shapes based on the
2047 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2048 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2049 {
2050 return Fail("%s: Error validating the output shape for concat", __func__);
2051 }
2052
2053 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2054 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2055 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2056
2057 bool isSupported = false;
2058 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2059 IsConcatSupported,
2060 data.m_Backends,
2061 isSupported,
2062 inputTensorInfos,
2063 outputInfo,
2064 concatDescriptor);
2065 if (!isSupported)
2066 {
2067 return false;
2068 }
2069
2070 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2071 assert(layer != nullptr);
2072 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2073
2074 // Connect inputs to the layer
2075 const int numInputSlots = layer->GetNumInputSlots();
2076 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2077 for (int i = 0; i < numInputSlots; ++i)
2078 {
2079 // connect the input directly to the merge (concat) layer
2080 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2081 }
2082
2083 if (needPermute)
2084 {
Mike Kelly4a956582020-02-28 10:32:09 +00002085 armnn::TransposeDescriptor transposeDesc;
2086 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002087 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2088 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2089 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002090
2091 bool isSupported = false;
2092 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002093 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002094 data.m_Backends,
2095 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002096 inputTransposeInfo,
2097 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002098 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002099 if (!isSupported)
2100 {
2101 return false;
2102 }
Mike Kellyb8805202019-07-31 17:25:43 +01002103 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002104 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2105 layer->GetOutputSlot(0),
2106 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002107 layer = &deswizzleLayer;
2108 }
2109
2110 if (inputsHaveBeenReshaped)
2111 {
2112 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2113
2114 // Undo the reshape knowing the amount of dimensions added
2115 if (tensorDimensionsAdded == 1)
2116 {
2117 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2118 afterConcatInfo.GetShape()[2] }));
2119 }
2120 else if (tensorDimensionsAdded == 2)
2121 {
2122 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2123 }
2124
Kevin Mayaed08ac2019-12-12 16:33:31 +00002125 armnn::ReshapeDescriptor reshapeDescriptor;
2126 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2127
2128 bool isSupported = false;
2129 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2130 IsReshapeSupported,
2131 data.m_Backends,
2132 isSupported,
2133 layer->GetOutputSlot(0).GetTensorInfo(),
2134 afterConcatInfo,
2135 reshapeDescriptor);
2136 if (!isSupported)
2137 {
2138 return false;
2139 }
2140
Mike Kellyb8805202019-07-31 17:25:43 +01002141 layer = &AddReshapeLayer(
2142 *data.m_Network,
2143 layer->GetOutputSlot(0),
2144 afterConcatInfo
2145 );
2146 }
2147
2148 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2149}
2150
2151template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002152 typename HalOperation = typename HalPolicy::Operation,
2153 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002154bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2155{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002156 using HalOperand = typename HalPolicy::Operand;
2157 using HalOperandType = typename HalPolicy::OperandType;
2158
2159 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002160 if (!input.IsValid())
2161 {
2162 return Fail("%s: Operation has invalid inputs", __func__);
2163 }
2164
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002165 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002166 if (!output)
2167 {
2168 return Fail("%s: Could not read output 0", __func__);
2169 }
2170
2171 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002172 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002174 if (IsDynamicTensor(outputInfo))
2175 {
2176 return Fail("%s: Dynamic output tensors are not supported", __func__);
2177 }
2178
Mike Kellyb5fdf382019-06-11 16:35:25 +01002179 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002180 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2181 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002182
2183 if (!weightsPin.IsValid() || !biasPin.IsValid())
2184 {
2185 return Fail("%s: Operation has invalid inputs", __func__);
2186 }
2187
2188 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002189 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002190 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2191
2192 armnn::Convolution2dDescriptor desc;
2193 desc.m_DataLayout = armnn::DataLayout::NHWC;
2194 ActivationFn activation;
2195
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002196 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002197 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002198 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2199 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2200 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2201 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2202 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2203 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002204 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002205 {
2206 return Fail("%s: Operation has invalid inputs", __func__);
2207 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002208 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002209 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002210 {
2211 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002212 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2213 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2214 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002215 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002216 {
2217 return Fail("%s: Operation has invalid inputs", __func__);
2218 }
2219
2220 const uint32_t kernelX = weights.GetShape()[2];
2221 const uint32_t kernelY = weights.GetShape()[1];
2222 const uint32_t inputX = inputInfo.GetShape()[2];
2223 const uint32_t inputY = inputInfo.GetShape()[1];
2224
2225 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2226 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002227 }
2228 else
2229 {
2230 return Fail("%s: Unsupported number of operation inputs", __func__);
2231 }
2232
2233 desc.m_BiasEnabled = true;
2234 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2235
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002236 bool isSupported = false;
2237 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2238 IsConvolution2dSupported,
2239 data.m_Backends,
2240 isSupported,
2241 inputInfo,
2242 outputInfo,
2243 desc,
2244 weights.GetInfo(),
2245 biases);
2246 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002247 {
2248 return false;
2249 }
2250
2251 armnn::IConnectableLayer* startLayer =
2252 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2253
2254 if (!startLayer)
2255 {
2256 return Fail("%s: AddConvolution2dLayer failed", __func__);
2257 }
2258
2259 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2260
2261 if (!endLayer)
2262 {
2263 return Fail("%s: ProcessActivation failed", __func__);
2264 }
2265
2266 input.Connect(startLayer->GetInputSlot(0));
2267
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002268 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002269}
2270
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002271template<typename HalPolicy,
2272 typename HalOperation = typename HalPolicy::Operation,
2273 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002274bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2275{
2276 using HalOperand = typename HalPolicy::Operand;
2277 using HalOperandType = typename HalPolicy::OperandType;
2278
2279 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2280 if (!input.IsValid() )
2281 {
2282 return Fail("%s: Operation has invalid inputs", __func__);
2283 }
2284
2285 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2286 unsigned int rank = inputInfo.GetNumDimensions();
2287 if (rank != 4)
2288 {
2289 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2290 }
2291
2292 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2293 if (!output)
2294 {
2295 return Fail("%s: Could not read output 0", __func__);
2296 }
2297
2298 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2299 if (IsDynamicTensor(outputInfo))
2300 {
2301 return Fail("%s: Dynamic output tensors are not supported", __func__);
2302 }
2303
2304 armnn::DepthToSpaceDescriptor descriptor;
2305
2306 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2307 if (descriptor.m_BlockSize <= 1)
2308 {
2309 return Fail("%s: Block size must be at least 1 in all dimensions");
2310 }
2311
2312 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002313 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002314 {
2315 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2316 }
2317
2318 bool isSupported = false;
2319 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2320 IsDepthToSpaceSupported,
2321 data.m_Backends,
2322 isSupported,
2323 inputInfo,
2324 outputInfo,
2325 descriptor);
2326 if (!isSupported)
2327 {
2328 return false;
2329 }
2330
2331 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2332 assert(layer != nullptr);
2333 input.Connect(layer->GetInputSlot(0));
2334
2335 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2336}
2337
2338template<typename HalPolicy,
2339 typename HalOperation = typename HalPolicy::Operation,
2340 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2342{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002343 using HalOperand = typename HalPolicy::Operand;
2344 using HalOperandType = typename HalPolicy::OperandType;
2345
2346 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002347
2348 if (!input.IsValid())
2349 {
2350 return Fail("%s: Operation has invalid inputs", __func__);
2351 }
2352
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002353 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002354
2355 if (!output)
2356 {
2357 return Fail("%s: Could not read output 0", __func__);
2358 }
2359
2360 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002361 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002362
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002363 if (IsDynamicTensor(outputInfo))
2364 {
2365 return Fail("%s: Dynamic output tensors are not supported", __func__);
2366 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002367
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002368 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002369 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002370 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002371
2372 if (weightsOperand == nullptr)
2373 {
2374 return Fail("%s: Operand is invalid", __func__);
2375 }
2376 armnn::DepthwiseConvolution2dDescriptor desc;
2377 desc.m_DataLayout = armnn::DataLayout::NHWC;
2378
Mike Kellyb5fdf382019-06-11 16:35:25 +01002379 // Reinterpret weight data as [ H, W, I, M ]
2380 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2381 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002382 inputInfo.GetShape()[3],
2383 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384
2385 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2386 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2387
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002388 const ConstTensorPin weightsPin =
2389 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2390 1,
2391 model,
2392 data,
2393 HWIMToMIHW,
2394 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002395
2396 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002397 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002398
2399 if (!weightsPin.IsValid() || !biasPin.IsValid())
2400 {
2401 return Fail("%s: Operation has invalid inputs", __func__);
2402 }
2403
2404 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2405 armnn::ConstTensor bias = biasPin.GetConstTensor();
2406 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2407
2408 ActivationFn activation;
2409
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002410 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002411 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002412 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2413 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2414 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2415 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2416 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2417 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002418 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002419 {
2420 return Fail("%s: Operation has invalid inputs", __func__);
2421 }
2422 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002423 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002424 {
2425 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002426 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2427 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2428 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002429 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002430 {
2431 return Fail("%s: Operation has invalid inputs", __func__);
2432 }
2433
2434 const uint32_t kernelX = weights.GetShape()[3];
2435 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002436 const uint32_t inputX = inputInfo.GetShape()[2];
2437 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002438
2439 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2440 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2441 }
2442 else
2443 {
2444 return Fail("%s: Unsupported number of operation inputs", __func__);
2445 }
2446
2447 desc.m_BiasEnabled = true;
2448 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2449
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002450 bool isSupported = false;
2451 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2452 IsDepthwiseConvolutionSupported,
2453 data.m_Backends,
2454 isSupported,
2455 inputInfo,
2456 outputInfo,
2457 desc,
2458 weights.GetInfo(),
2459 biases);
2460 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002461 {
2462 return false;
2463 }
2464
2465 armnn::IConnectableLayer* startLayer =
2466 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2467 if (!startLayer)
2468 {
2469 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2470 }
2471
2472 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2473 if (!endLayer)
2474 {
2475 return Fail("%s: ProcessActivation failed", __func__);
2476 }
2477
2478 input.Connect(startLayer->GetInputSlot(0));
2479
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002480 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002481}
2482
Mike Kelly3c673942019-07-25 09:26:06 +01002483template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002484 typename HalOperation = typename HalPolicy::Operation,
2485 typename HalModel = typename HalPolicy::Model>
2486bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002487{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002488 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002489
2490 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2491 if (!input.IsValid())
2492 {
2493 return Fail("%s: Operation has invalid input", __func__);
2494 }
2495
Sadik Armagan98c0f662019-11-21 15:54:36 +00002496 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2497 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2498 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2499 {
2500 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2501 }
2502
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002503 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002504 if (!outputOperand)
2505 {
2506 return Fail("%s: Operation has invalid outputs", __func__);
2507 }
2508
2509 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2510 if (IsDynamicTensor(outputInfo))
2511 {
2512 return Fail("%s: Dynamic output tensors are not supported", __func__);
2513 }
2514
2515 bool isSupported = false;
2516 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2517 IsDequantizeSupported,
2518 data.m_Backends,
2519 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002520 inputInfo,
2521 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002522 if (!isSupported)
2523 {
2524 return false;
2525 }
2526
2527 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2528 assert(layer != nullptr);
2529 input.Connect(layer->GetInputSlot(0));
2530
2531 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2532}
2533
2534template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002535 typename HalOperation = typename HalPolicy::Operation,
2536 typename HalModel = typename HalPolicy::Model>
2537bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002538{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002539 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002540
2541 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2542 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2543
2544 if (!input0.IsValid() || !input1.IsValid())
2545 {
2546 return Fail("%s: Operation has invalid inputs", __func__);
2547 }
2548
2549 // The FuseActivation parameter is always the input index 2
2550 // and it should be optional
2551 ActivationFn activationFunction;
2552 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2553 {
2554 return Fail("%s: Operation has invalid inputs", __func__);
2555 }
2556
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002557 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002558 if (!output)
2559 {
2560 return Fail("%s: Could not read output 0", __func__);
2561 }
2562
2563 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2564 if (IsDynamicTensor(outputInfo))
2565 {
2566 return Fail("%s: Dynamic output tensors are not supported", __func__);
2567 }
2568
2569 bool isSupported = false;
2570 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2571 IsDivisionSupported,
2572 data.m_Backends,
2573 isSupported,
2574 input0.GetTensorInfo(),
2575 input1.GetTensorInfo(),
2576 outputInfo);
2577 if (!isSupported)
2578 {
2579 return false;
2580 }
2581
2582 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2583 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2584
2585 if (endLayer)
2586 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002587 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002588 if (!isReshapeSupported)
2589 {
2590 return false;
2591 }
2592
Mike Kelly46272802019-08-14 17:00:48 +01002593 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2594 }
2595 return Fail("%s: ProcessActivation failed", __func__);
2596}
2597
2598template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002599 typename HalOperation = typename HalPolicy::Operation,
2600 typename HalModel = typename HalPolicy::Model>
2601bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002602{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002603 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002604
2605 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2606 if (!input.IsValid())
2607 {
2608 return Fail("%s: Operation has invalid inputs", __func__);
2609 }
2610
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002611 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002612 if (!outputOperand)
2613 {
2614 return Fail("%s: Operation has invalid outputs", __func__);
2615 }
2616
2617 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2618 if (IsDynamicTensor(outputInfo))
2619 {
2620 return Fail("%s: Dynamic output tensors are not supported", __func__);
2621 }
2622
2623 bool isSupported = false;
2624 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2625 IsFloorSupported,
2626 data.m_Backends,
2627 isSupported,
2628 input.GetTensorInfo(),
2629 outputInfo);
2630 if (!isSupported)
2631 {
2632 return false;
2633 }
2634
2635 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2636 assert(layer != nullptr);
2637 input.Connect(layer->GetInputSlot(0));
2638
2639 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2640}
2641
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002642inline bool IsQSymm8(const V1_0::Operand&)
2643{
2644 return false;
2645}
2646
Kevin May42477c12020-03-26 13:34:14 +00002647#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002648
2649inline bool IsQSymm8(const V1_2::Operand& operand)
2650{
2651 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2652}
2653
2654#endif
2655
Kevin May42477c12020-03-26 13:34:14 +00002656#ifdef ARMNN_ANDROID_NN_V1_3
2657
2658inline bool IsQSymm8(const V1_3::Operand& operand)
2659{
2660 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2661}
2662
2663#endif
2664
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002665enum class DequantizeStatus
2666{
2667 SUCCESS,
2668 NOT_REQUIRED,
2669 INVALID_OPERAND
2670};
2671
2672using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2673
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002674template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002675 typename HalOperation = typename HalPolicy::Operation,
2676 typename HalModel = typename HalPolicy::Model>
2677DequantizeResult DequantizeIfRequired(size_t operand_index,
2678 const HalOperation& operation,
2679 const HalModel& model,
2680 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002681{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002682 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002683
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002684 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002685 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002686 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002687 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002688 }
2689
2690 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2691 {
2692 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002693 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002694 }
2695
2696 const size_t weightsInputIndex = operation.inputs[operand_index];
2697
2698 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2699 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002700 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002701 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002702 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002703 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002704 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2705 {
2706 continue;
2707 }
2708
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002709 size_t outOpIndex = weightsInputIndex + 1;
2710 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002711 {
2712 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002713 }
2714
2715 if (outOpIndex != weightsInputIndex)
2716 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002717 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002718 }
2719
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002720 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002721 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002722
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002723 if (!IsQSymm8(*operand))
2724 {
2725 // Only supporting dequantize from QSYMM8 to FLOAT
2726 break;
2727 }
2728
2729 // Allocate a new buffer for the dequantized data and manually dequantize
2730 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2731 if (!startValue)
2732 {
2733 // Failed to get the operand address
2734 break;
2735 }
2736
2737 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2738 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002739 const float quantizationScale = operand->scale;
2740
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002741 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2742 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2743 {
2744 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002745 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002746 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2747 }
2748
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002749 // Construct tensor info for dequantized ConstTensor
2750 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2751 operand->dimensions.data(),
2752 armnn::DataType::Float32);
2753
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002754 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2755 std::move(tensorInfo),
2756 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002757 }
2758
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002759 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002760}
2761
2762template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002763 typename HalOperation = typename HalPolicy::Operation,
2764 typename HalModel = typename HalPolicy::Model>
2765ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2766 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002767 const ConversionData& data,
2768 size_t operandIndex,
2769 bool optional = false)
2770{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002771 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2772
2773 DequantizeStatus status = std::get<3>(dequantized);
2774 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002775 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002776 case DequantizeStatus::INVALID_OPERAND:
2777 {
2778 // return invalid const tensor pin
2779 return ConstTensorPin();
2780 }
2781 case DequantizeStatus::NOT_REQUIRED:
2782 {
2783 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2784 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2785 }
2786 case DequantizeStatus::SUCCESS:
2787 default:
2788 {
2789 return ConstTensorPin(
2790 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2791 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002792 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002793}
2794
2795
Mike Kelly46272802019-08-14 17:00:48 +01002796template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002797 typename HalOperation = typename HalPolicy::Operation,
2798 typename HalModel = typename HalPolicy::Model>
2799bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002800{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002801 using HalOperand = typename HalPolicy::Operand;
2802
Mike Kelly46272802019-08-14 17:00:48 +01002803 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2804 if (!input.IsValid())
2805 {
2806 return Fail("%s: Operation has invalid inputs", __func__);
2807 }
2808
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002809 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002810 if (!output)
2811 {
2812 return Fail("%s: Could not read output 0", __func__);
2813 }
2814
2815 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2816 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2817
2818 if (IsDynamicTensor(outputInfo))
2819 {
2820 return Fail("%s: Dynamic output tensors are not supported", __func__);
2821 }
2822
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002823 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2824 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002825
2826 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002827 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002828 return Fail("%s: Operation has invalid weights", __func__);
2829 }
2830
2831 if (!biasPin.IsValid())
2832 {
2833 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002834 }
2835
2836 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2837 armnn::ConstTensor bias = biasPin.GetConstTensor();
2838 armnn::TensorInfo reshapedInfo = inputInfo;
2839
2840 try
2841 {
2842 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002843 }
2844 catch (const std::exception& e)
2845 {
Mike Kelly46272802019-08-14 17:00:48 +01002846 return Fail("%s: %s", __func__, e.what());
2847 }
2848
2849 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2850 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2851
2852 ActivationFn activationFunction;
2853 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2854 {
2855 return Fail("%s: Operation has invalid inputs", __func__);
2856 }
2857
2858 armnn::FullyConnectedDescriptor desc;
2859 desc.m_TransposeWeightMatrix = true;
2860 desc.m_BiasEnabled = true;
2861
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002862 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2863 weights.GetInfo().GetShape(),
2864 outputInfo.GetShape(),
2865 desc.m_TransposeWeightMatrix))
2866 {
2867 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2868 }
2869
Mike Kelly46272802019-08-14 17:00:48 +01002870 bool isSupported = false;
2871 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2872 IsFullyConnectedSupported,
2873 data.m_Backends,
2874 isSupported,
2875 reshapedInfo,
2876 outputInfo,
2877 weights.GetInfo(),
2878 bias.GetInfo(),
2879 desc);
2880 if (!isSupported)
2881 {
2882 return false;
2883 }
2884
2885 armnn::IConnectableLayer* startLayer =
2886 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2887 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2888
2889 if (endLayer != nullptr)
2890 {
2891 if (inputInfo.GetNumDimensions() > 2U)
2892 {
2893 armnn::ReshapeDescriptor reshapeDescriptor;
2894 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2895
2896 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2897 assert(reshapeLayer != nullptr);
2898 input.Connect(reshapeLayer->GetInputSlot(0));
2899 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2900 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2901 }
2902 else
2903 {
2904 input.Connect(startLayer->GetInputSlot(0));
2905 }
2906
2907 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2908 }
2909 else
2910 {
2911 return Fail("%s: ProcessActivation failed", __func__);
2912 }
2913}
2914
2915template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002916 typename HalOperation = typename HalPolicy::Operation,
2917 typename HalModel = typename HalPolicy::Model>
2918bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002919{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002920 using HalOperand = typename HalPolicy::Operand;
2921
Mike Kelly999e2092019-08-15 10:46:46 +01002922 if (operation.inputs.size() != 1)
2923 {
2924 return Fail("%s: Optional inputs are not supported", __func__);
2925 }
2926
Mike Kelly46272802019-08-14 17:00:48 +01002927 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2928 if (!input.IsValid())
2929 {
2930 return Fail("%s: Operation has invalid inputs", __func__);
2931 }
2932
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002933 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002934 if (!output)
2935 {
2936 return Fail("%s: Could not read output 0", __func__);
2937 }
2938
2939 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2940 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2941
2942 if (IsDynamicTensor(outputInfo))
2943 {
2944 return Fail("%s: Dynamic output tensors are not supported", __func__);
2945 }
2946 if (outputInfo.GetNumDimensions() != 4u)
2947 {
2948 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2949 }
2950
2951 armnn::L2NormalizationDescriptor desc;
2952 desc.m_DataLayout = armnn::DataLayout::NHWC;
2953
2954 bool isSupported = false;
2955 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2956 IsL2NormalizationSupported,
2957 data.m_Backends,
2958 isSupported,
2959 inputInfo,
2960 outputInfo,
2961 desc);
2962 if (!isSupported)
2963 {
2964 return false;
2965 }
2966
2967 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2968 assert(layer != nullptr);
2969 input.Connect(layer->GetInputSlot(0));
2970
2971 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2972}
2973
2974template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002975 typename HalOperation = typename HalPolicy::Operation,
2976 typename HalModel = typename HalPolicy::Model>
2977bool ConvertLocalResponseNormalization(const HalOperation& operation,
2978 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002979 ConversionData& data)
2980{
Mike Kelly999e2092019-08-15 10:46:46 +01002981 if (operation.inputs.size() != 5)
2982 {
2983 return Fail("%s: Optional inputs are not supported", __func__);
2984 }
2985
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002986 using HalOperand = typename HalPolicy::Operand;
2987 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002988
2989 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2990 if (!input.IsValid())
2991 {
2992 return Fail("%s: Operation has invalid inputs", __func__);
2993 }
2994
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002995 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002996 if (!output)
2997 {
2998 return Fail("%s: Could not read output 0", __func__);
2999 }
3000
3001 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3002 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3003
3004 if (IsDynamicTensor(outputInfo))
3005 {
3006 return Fail("%s: Dynamic output tensors are not supported", __func__);
3007 }
3008 if (outputInfo.GetNumDimensions() != 4u)
3009 {
3010 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3011 }
3012
3013 armnn::NormalizationDescriptor descriptor;
3014 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3015 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3016 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3017
3018 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003019 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003020 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3021 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3022 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3023 {
3024 return Fail("%s: Operation has invalid inputs", __func__);
3025 }
3026
3027 // ArmNN expects normSize to be the full size of the normalization
3028 // window rather than the radius as in AndroidNN.
3029 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3030
3031 bool isSupported = false;
3032 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3033 IsNormalizationSupported,
3034 data.m_Backends,
3035 isSupported,
3036 inputInfo,
3037 outputInfo,
3038 descriptor);
3039 if (!isSupported)
3040 {
3041 return false;
3042 }
3043
3044
3045 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3046 assert(layer != nullptr);
3047 input.Connect(layer->GetInputSlot(0));
3048
3049 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3050}
3051
3052template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003053 typename HalOperation = typename HalPolicy::Operation,
3054 typename HalModel = typename HalPolicy::Model>
3055bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003056{
Mike Kelly46272802019-08-14 17:00:48 +01003057 armnn::ActivationDescriptor desc;
3058 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3059
3060 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3061}
3062
3063template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003064 typename HalOperation = typename HalPolicy::Operation,
3065 typename HalModel = typename HalPolicy::Model>
3066bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003067{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003068 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003069
3070 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3071 if (!input.IsValid())
3072 {
3073 return Fail("%s: Operation has invalid inputs", __func__);
3074 }
3075
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003076 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003077 if (!output)
3078 {
3079 return Fail("%s: Could not read output 0", __func__);
3080 }
3081
3082 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3083 if (IsDynamicTensor(outputInfo))
3084 {
3085 return Fail("%s: Dynamic output tensors are not supported", __func__);
3086 }
3087
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003088 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003089 if (!axisOperand)
3090 {
3091 return Fail("%s: Could not read input 1", __func__);
3092 }
3093
3094 std::vector<int32_t> axis;
3095 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3096 {
3097 return Fail("%s: Input 1 has invalid values", __func__);
3098 }
3099
3100 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3101
3102 // Convert the axis to unsigned int and remove duplicates.
3103 unsigned int rank = inputInfo.GetNumDimensions();
3104 std::set<unsigned int> uniqueAxis;
3105 std::transform(axis.begin(), axis.end(),
3106 std::inserter(uniqueAxis, uniqueAxis.begin()),
3107 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3108
3109 // Get the "keep dims" flag.
3110 int32_t keepDims = 0;
3111 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3112 {
3113 return Fail("%s: Could not read input 2", __func__);
3114 }
3115
3116 armnn::MeanDescriptor descriptor;
3117 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3118 descriptor.m_KeepDims = keepDims > 0;
3119
3120 bool isSupported = false;
3121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3122 IsMeanSupported,
3123 data.m_Backends,
3124 isSupported,
3125 inputInfo,
3126 outputInfo,
3127 descriptor);
3128 if (!isSupported)
3129 {
3130 return false;
3131 }
3132
3133 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3134 assert(layer != nullptr);
3135 input.Connect(layer->GetInputSlot(0));
3136
3137 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3138}
3139
3140template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003141 typename HalOperation = typename HalPolicy::Operation,
3142 typename HalModel = typename HalPolicy::Model>
3143bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003144{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003145 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003146
3147 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3148 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3149
3150 if (!input0.IsValid() || !input1.IsValid())
3151 {
3152 return Fail("%s: Operation has invalid inputs", __func__);
3153 }
3154
3155 // The FuseActivation parameter is always the input index 2
3156 // and it should be optional
3157 ActivationFn activationFunction;
3158 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3159 {
3160 return Fail("%s: Operation has invalid inputs", __func__);
3161 }
3162
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003163 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003164
3165 if (outputOperand == nullptr)
3166 {
3167 return false;
3168 }
3169
3170 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3171 if (IsDynamicTensor(outputInfo))
3172 {
3173 return Fail("%s: Dynamic output tensors are not supported", __func__);
3174 }
3175
3176 bool isSupported = false;
3177 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3178 IsMultiplicationSupported,
3179 data.m_Backends,
3180 isSupported,
3181 input0.GetTensorInfo(),
3182 input1.GetTensorInfo(),
3183 outputInfo);
3184 if (!isSupported)
3185 {
3186 return false;
3187 }
3188
3189 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3190 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3191
3192 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3193 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3194
3195 if (endLayer != nullptr)
3196 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003197 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003198 if (!isReshapeSupported)
3199 {
3200 return false;
3201 }
3202
Mike Kelly46272802019-08-14 17:00:48 +01003203 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3204 }
3205 else
3206 {
3207 return Fail("%s: ProcessActivation failed", __func__);
3208 }
3209}
3210
3211template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003212 typename HalOperation = typename HalPolicy::Operation,
3213 typename HalModel = typename HalPolicy::Model>
3214bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003215{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003216 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003217
Mike Kelly3c673942019-07-25 09:26:06 +01003218 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3219 if (!input.IsValid())
3220 {
3221 return Fail("%s: Operation has invalid inputs", __func__);
3222 }
3223
3224 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3225 unsigned int rank = inputInfo.GetNumDimensions();
3226
3227 armnn::PadDescriptor descriptor;
3228 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3229 {
3230 return Fail("%s: Could not convert paddings", __func__);
3231 }
3232
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003233 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3234 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003235 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3236 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3237 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003238 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003239 {
3240 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3241 }
3242
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003243 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003244 if (!output)
3245 {
3246 return Fail("%s: Could not read output", __func__);
3247 }
3248
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003249 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003250 if (IsDynamicTensor(outputInfo))
3251 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003252 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003253 }
3254
3255 bool isSupported = false;
3256 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3257 IsPadSupported,
3258 data.m_Backends,
3259 isSupported,
3260 inputInfo,
3261 outputInfo,
3262 descriptor);
3263 if (!isSupported)
3264 {
3265 return false;
3266 }
3267
3268 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3269 assert(layer != nullptr);
3270 input.Connect(layer->GetInputSlot(0));
3271 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3272
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003273 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003274}
3275
Mike Kelly0a879362019-07-29 16:56:31 +01003276template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003277 typename HalOperation = typename HalPolicy::Operation,
3278 typename HalModel = typename HalPolicy::Model>
3279bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003280{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003281 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003282
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003283 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3284 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3285 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003286
3287 if (inputOperand == nullptr
3288 || requestedShapeOperand == nullptr
3289 || outputOperand == nullptr)
3290 {
3291 return Fail("%s: Operation has invalid inputs", __func__);
3292 }
3293
3294 if (requestedShapeOperand->dimensions.size() != 1)
3295 {
3296 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3297 __func__, requestedShapeOperand->dimensions.size());
3298 }
3299
3300 std::vector<int32_t> targetDimensions;
3301 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3302 {
3303 return Fail("%s: Could not read values of input 1", __func__);
3304 }
3305
3306 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3307
3308 Shape requestedShape;
3309 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3310 // function that resolves these values into a fully specified tensor shape.
3311 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3312 {
3313 return Fail("%s: Failed to resolve the requested shape", __func__);
3314 }
3315
3316 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3317 if (!SameShape(requestedShape, outputOperandShape))
3318 {
3319 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3320 }
3321
3322 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3323 if (!input.IsValid())
3324 {
3325 return Fail("%s: Could not read input 0", __func__);
3326 }
3327
3328 armnn::ReshapeDescriptor reshapeDescriptor;
3329 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3330 requestedShape.dimensions.data());
3331
3332 bool isSupported = false;
3333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3334 IsReshapeSupported,
3335 data.m_Backends,
3336 isSupported,
3337 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003338 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003339 reshapeDescriptor);
3340 if (!isSupported)
3341 {
3342 return false;
3343 }
3344
3345 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3346 assert(layer != nullptr);
3347 input.Connect(layer->GetInputSlot(0));
3348
3349 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3350}
3351
3352template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003353 typename HalOperation = typename HalPolicy::Operation,
3354 typename HalModel = typename HalPolicy::Model>
3355bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003356{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003357 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003358
Mike Kelly0a879362019-07-29 16:56:31 +01003359 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3360 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3361
3362 if (!input0.IsValid() || !input1.IsValid())
3363 {
3364 return Fail("%s: Operation has invalid inputs", __func__);
3365 }
3366
3367 // The FuseActivation parameter is always the input index 2
3368 // and it should be optional
3369 ActivationFn activationFunction;
3370 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3371 {
3372 return Fail("%s: Operation has invalid inputs", __func__);
3373 }
3374
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003375 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003376 if (!output)
3377 {
3378 return Fail("%s: Could not read output 0", __func__);
3379 }
3380
3381 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3382 if (IsDynamicTensor(outputInfo))
3383 {
3384 return Fail("%s: Dynamic output tensors are not supported", __func__);
3385 }
3386
3387 bool isSupported = false;
3388 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3389 IsSubtractionSupported,
3390 data.m_Backends,
3391 isSupported,
3392 input0.GetTensorInfo(),
3393 input1.GetTensorInfo(),
3394 outputInfo);
3395 if (!isSupported)
3396 {
3397 return false;
3398 }
3399
3400 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3401 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3402
3403 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3404 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3405
3406 if (endLayer)
3407 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003408 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003409 if (!isReshapeSupported)
3410 {
3411 return false;
3412 }
Mike Kelly0a879362019-07-29 16:56:31 +01003413 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3414 }
3415
3416 return Fail("%s: ProcessActivation failed", __func__);
3417}
3418
Finn Williams23b87b32019-07-30 11:44:05 +01003419template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003420 typename HalOperation = typename HalPolicy::Operation,
3421 typename HalModel = typename HalPolicy::Model>
3422bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003423{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003424 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003425
3426 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3427 if (!input.IsValid())
3428 {
3429 return Fail("%s: Operation has invalid inputs", __func__);
3430 }
3431
3432 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3433 unsigned int rank = inputInfo.GetNumDimensions();
3434 if (rank > 4)
3435 {
3436 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3437 }
3438
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003439 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003440 if (!output)
3441 {
3442 return Fail("%s: Could not read output 0", __func__);
3443 }
3444
3445 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3446 {
3447 return Fail("%s: Dynamic output tensors are not supported", __func__);
3448 }
3449
3450 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3451 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003452 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003453
3454 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3455
3456 std::vector<int32_t> axis;
3457 if (!axisOperand)
3458 {
3459 axis.assign(dimensionSequence,
3460 dimensionSequence + rank);
3461 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003462 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003463 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003464 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003465 }
3466
3467 std::vector<uint32_t> outputDims;
3468 for (unsigned int i = 0; i < rank; i++)
3469 {
3470 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3471 auto currentDimension = inputInfo.GetShape()[i];
3472 if (skipSqueeze || currentDimension != 1)
3473 {
3474 outputDims.push_back(currentDimension);
3475 }
3476 }
3477
3478 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3479
3480 armnn::TensorInfo outputInfo = inputInfo;
3481 outputInfo.SetShape(outShape);
3482
3483 armnn::ReshapeDescriptor reshapeDesc;
3484 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3485
3486 bool isSupported = false;
3487 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3488 IsReshapeSupported,
3489 data.m_Backends,
3490 isSupported,
3491 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003492 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003493 reshapeDesc);
3494 if (!isSupported)
3495 {
3496 return false;
3497 }
3498
3499 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3500 assert(layer != nullptr);
3501 input.Connect(layer->GetInputSlot(0));
3502
3503 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3504}
3505
3506template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003507 typename HalOperation = typename HalPolicy::Operation,
3508 typename HalModel = typename HalPolicy::Model>
3509bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003510{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003511 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003512
3513 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3514 if (!input.IsValid())
3515 {
3516 return Fail("%s: Operation has invalid inputs", __func__);
3517 }
3518
3519 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3520 unsigned int rank = inputInfo.GetNumDimensions();
3521 if (rank > 4)
3522 {
3523 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3524 }
3525
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003526 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003527 if (!output)
3528 {
3529 return Fail("%s: Could not read output 0", __func__);
3530 }
3531
3532 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3533 if (IsDynamicTensor(outputInfo))
3534 {
3535 return Fail("%s: Dynamic output tensors are not supported", __func__);
3536 }
3537
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003538 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3539 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3540 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003541
3542 std::vector<int32_t> beginValues;
3543 std::vector<int32_t> endValues;
3544 std::vector<int32_t> stridesValues;
3545
3546 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003547 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003548 {
3549 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3550 {
3551 return false;
3552 }
3553
3554 if (operandValues.size() != rank)
3555 {
3556 return false;
3557 }
3558
3559 return true;
3560 };
3561
3562 if (!ValidateInputOperands(*beginOperand, beginValues)
3563 || !ValidateInputOperands(*endOperand, endValues)
3564 || !ValidateInputOperands(*stridesOperand, stridesValues))
3565 {
3566 return Fail("%s: Operation has invalid input operand", __func__);
3567 }
3568
3569 // Stride cannot have value '0'
3570 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3571 {
3572 return Fail("%s: Stride must be non-zero value.", __func__);
3573 }
3574
3575 armnn::StridedSliceDescriptor descriptor;
3576 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3577 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3578 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3579 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3580
3581 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3582 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3583 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3584 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3585 {
3586 return Fail("%s: Operation has invalid inputs", __func__);
3587 }
3588
3589 bool isSupported = false;
3590 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3591 IsStridedSliceSupported,
3592 data.m_Backends,
3593 isSupported,
3594 inputInfo,
3595 outputInfo,
3596 descriptor);
3597 if (!isSupported)
3598 {
3599 return false;
3600 }
3601
3602 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3603 assert(layer != nullptr);
3604 input.Connect(layer->GetInputSlot(0));
3605
3606 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3607}
3608
3609template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003610 typename HalOperation = typename HalPolicy::Operation,
3611 typename HalModel = typename HalPolicy::Model>
3612bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003613{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003614 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003615
3616 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3617 if (!input.IsValid())
3618 {
3619 return Fail("%s: Operation has invalid inputs", __func__);
3620 }
3621
3622 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3623 unsigned int rank = inputInfo.GetNumDimensions();
3624 if (rank > 4)
3625 {
3626 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3627 }
3628
3629 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3630 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003631 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003632
3633 std::vector<int32_t> perm(rank);
3634 if (!permOperand)
3635 {
3636 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3637 for (unsigned int i = rank; i > 0; i--)
3638 {
3639 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3640 }
3641 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003642 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003643 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003644 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003645 }
3646
3647 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3648
Mike Kelly4a956582020-02-28 10:32:09 +00003649 armnn::TransposeDescriptor transposeDesc;
3650 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003651
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003652 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003653 if (!output)
3654 {
3655 return Fail("%s: Could not read output 0", __func__);
3656 }
3657
3658 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003659 if (IsDynamicTensor(outputInfo))
3660 {
3661 return Fail("%s: Dynamic output tensors are not supported", __func__);
3662 }
3663
Mike Kelly46272802019-08-14 17:00:48 +01003664
3665 bool isSupported = false;
3666 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003667 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003668 data.m_Backends,
3669 isSupported,
3670 inputInfo,
3671 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003672 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003673 if (!isSupported)
3674 {
3675 return false;
3676 }
3677
Mike Kelly4a956582020-02-28 10:32:09 +00003678 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003679 assert(layer != nullptr);
3680 input.Connect(layer->GetInputSlot(0));
3681
3682 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3683}
3684
3685template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003686 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003687 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003688 typename HalModel = typename HalPolicy::Model>
3689bool ConvertBatchToSpaceNd(const HalOperation& operation,
3690 const HalModel& model,
3691 ConversionData& data)
3692{
Finn Williams23b87b32019-07-30 11:44:05 +01003693
3694 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3695 if (!input.IsValid())
3696 {
3697 return Fail("%s: Operation has invalid inputs", __func__);
3698 }
3699
3700 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3701 if (!output)
3702 {
3703 return Fail("%s: Could not read output 0", __func__);
3704 }
3705
3706 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3707 if (IsDynamicTensor(outputInfo))
3708 {
3709 return Fail("%s: Dynamic output tensors are not supported", __func__);
3710 }
3711
3712 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3713 if (!blockOperand)
3714 {
3715 return Fail("%s: Could not read input 1", __func__);
3716 }
3717
3718 // Convert the block operand to int32
3719 std::vector<int32_t> block;
3720 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3721 {
3722 return Fail("%s: Input 1 has invalid values", __func__);
3723 }
3724
3725 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3726
3727 unsigned int rank = inputInfo.GetNumDimensions();
3728 if (rank != 4)
3729 {
3730 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3731 }
3732
3733 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3734 {
3735 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3736 " greater than or equal to 1", __func__);
3737 }
3738
3739 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3740 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3741 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3742
Kevin May42477c12020-03-26 13:34:14 +00003743 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003744 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003745 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003746 }
3747 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3748 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3749
3750 bool isSupported = false;
3751 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3752 IsBatchToSpaceNdSupported,
3753 data.m_Backends,
3754 isSupported,
3755 inputInfo,
3756 outputInfo,
3757 batchToSpaceNdDesc);
3758 if (!isSupported)
3759 {
3760 return false;
3761 }
3762
3763 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3764 assert(layer != nullptr);
3765 input.Connect(layer->GetInputSlot(0));
3766
3767 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3768}
Mike Kelly0a879362019-07-29 16:56:31 +01003769
Finn Williamsd74c5052019-07-30 17:06:00 +01003770template<typename HalPolicy,
3771 typename HalOperation = typename HalPolicy::Operation,
3772 typename HalOperand = typename HalPolicy::Operand,
3773 typename HalModel = typename HalPolicy::Model>
3774bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3775{
3776 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3777 if (!input.IsValid())
3778 {
3779 return Fail("%s: Operation has invalid inputs", __func__);
3780 }
3781
3782 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3783 unsigned int rank = inputInfo.GetNumDimensions();
3784 unsigned int spatialDim = rank - 2;
3785
3786 if (rank != 4)
3787 {
3788 Fail("%s: Only inputs with rank 4 are supported", __func__);
3789 }
3790
3791 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3792 if (!output)
3793 {
3794 return Fail("%s: Could not read output 0", __func__);
3795 }
3796
3797 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3798 if (IsDynamicTensor(outputInfo))
3799 {
3800 return Fail("%s: Dynamic output tensors are not supported", __func__);
3801 }
3802
3803 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3804 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3805
3806 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3807 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3808 {
3809 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3810 }
3811
3812 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003813 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3814 {
3815 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3816 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003817 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3818 {
3819 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3820 }
3821
3822 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3823 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3824 {
3825 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3826 }
3827
3828 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3829 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003830 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3831 {
3832 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3833 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003834 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3835 {
3836 int paddingBeforeInput = paddings[i];
3837 int paddingAfterInput = paddings[i + 1];
3838 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3839 {
3840 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3841 }
3842
3843 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3844 }
3845
3846 armnn::SpaceToBatchNdDescriptor descriptor;
3847 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3848 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3849 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3850
Kevin May42477c12020-03-26 13:34:14 +00003851 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01003852 {
3853 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3854 }
3855
3856 bool isSupported = false;
3857 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3858 IsSpaceToBatchNdSupported,
3859 data.m_Backends,
3860 isSupported,
3861 inputInfo,
3862 outputInfo,
3863 descriptor);
3864 if (!isSupported)
3865 {
3866 return false;
3867 }
3868
3869 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3870 assert(layer != nullptr);
3871 input.Connect(layer->GetInputSlot(0));
3872
3873 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3874}
3875
saoste01b8471482018-10-10 09:44:51 +01003876} // namespace armnn_driver