blob: d19131ce904a25d532d1db77a2548db07c76f0c8 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010015
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000016#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000017#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010018
Mike Kelly46272802019-08-14 17:00:48 +010019#include "1.0/FullyConnected.hpp"
20
arovir01b0717b52018-09-05 17:03:25 +010021#include <ActivationFunctor.h>
22#include <CpuExecutor.h>
23#include <OperationsUtils.h>
24
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Kevin May42477c12020-03-26 13:34:14 +0000186#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000192 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 type == V1_2::OperandType::TENSOR_FLOAT16 ||
194 type == V1_2::OperandType::TENSOR_FLOAT32 ||
195 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
198 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199 type == V1_2::OperandType::TENSOR_INT32;
200}
201
202#endif
203
Kevin May42477c12020-03-26 13:34:14 +0000204#ifdef ARMNN_ANDROID_NN_V1_3
205
206// Support within the 1.3 driver for specific tensor data types
207inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
208{
209 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100210 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000211 type == V1_3::OperandType::TENSOR_FLOAT16 ||
212 type == V1_3::OperandType::TENSOR_FLOAT32 ||
213 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100214 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000215 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
216 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
217 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
218 type == V1_3::OperandType::TENSOR_INT32;
219}
220
221#endif
222
Mike Kellyb5fdf382019-06-11 16:35:25 +0100223inline bool IsBool(V1_0::Operand)
224{
225 return false;
226}
227
Kevin May42477c12020-03-26 13:34:14 +0000228inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100229{
230 return false;
231}
232
Kevin May42477c12020-03-26 13:34:14 +0000233#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100234
235inline bool IsBool(V1_2::Operand operand)
236{
237 return operand.type == V1_2::OperandType::BOOL;
238}
239
Sadik Armagan61113162019-07-25 09:09:40 +0100240/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000241inline bool Is12OrLaterOperand(V1_2::Operand)
242{
243 return true;
244}
245
246#endif
247
248#ifdef ARMNN_ANDROID_NN_V1_3
249
250inline bool IsBool(V1_3::Operand operand)
251{
252 return operand.type == V1_3::OperandType::BOOL;
253}
254
255/// Checks if a operand is 1_2 Operand
256inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100257{
258 return true;
259}
260
Mike Kellyb5fdf382019-06-11 16:35:25 +0100261#endif
262
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100263template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000264armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
265 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100266 armnn::TensorInfo reshapeInfo)
267{
268 armnn::ReshapeDescriptor reshapeDescriptor;
269 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
270
271 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100272 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273
274 // Attach the input layer to the reshape layer
275 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
276 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
277
278 return *reshapeLayer;
279}
280
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000281bool BroadcastTensor(LayerInputHandle& input0,
282 LayerInputHandle& input1,
283 armnn::IConnectableLayer* startLayer,
284 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100285{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100286 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100287
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
289 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
290
291 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
292 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
293
294 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100295 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100296 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
297 input0.Connect(startLayer->GetInputSlot(0));
298 input1.Connect(startLayer->GetInputSlot(1));
299
Sadik Armagan64b19b52019-08-19 09:49:58 +0100300 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301 }
302
303 // Since the number of dimensions do not match then we need to add degenerate dimensions
304 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
305
306 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
307 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
308 boost::numeric_cast<int>(inputDimensions1));
309
310 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
311 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
312 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
313
314 const armnn::TensorShape& smallShape = smallInfo.GetShape();
315 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
316 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
317 {
318 reshapedDimensions[i] = smallShape[i - sizeDifference];
319 }
320
321 armnn::TensorInfo reshapedInfo = smallInfo;
322 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
323 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
326 armnn::ReshapeDescriptor reshapeDescriptor;
327
328 bool isSupported = false;
329 FORWARD_LAYER_SUPPORT_FUNC(__func__,
330 IsReshapeSupported,
331 data.m_Backends,
332 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000333 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100334 reshapedInfo,
335 reshapeDescriptor);
336 if (!isSupported)
337 {
338 return false;
339 }
340
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100341 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100342 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100343
344 if (input0IsSmaller)
345 {
346 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
347 //
348 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100349 // | |
350 // Reshape |
351 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100353
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100354 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
355 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100356 }
357 else
358 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100359 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
360 //
361 // Input0 Input1
362 // | |
363 // | Reshape
364 // \ /
365 // StartLayer
366
arovir01b0717b52018-09-05 17:03:25 +0100367 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100368 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100369 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100370
371 return true;
arovir01b0717b52018-09-05 17:03:25 +0100372}
373
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000374void CalcPadding(uint32_t input,
375 uint32_t kernel,
376 uint32_t stride,
377 uint32_t& outPadHead,
378 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100379 android::nn::PaddingScheme scheme)
380{
381 int32_t padHead;
382 int32_t padTail;
383 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
384 outPadHead = boost::numeric_cast<uint32_t>(padHead);
385 outPadTail = boost::numeric_cast<uint32_t>(padTail);
386}
387
Kevin May42477c12020-03-26 13:34:14 +0000388#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100389
390void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
391 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
392{
393 int32_t padHead;
394 int32_t padTail;
395 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
396 outPadHead = boost::numeric_cast<uint32_t>(padHead);
397 outPadTail = boost::numeric_cast<uint32_t>(padTail);
398}
399
Mike Kelly26123db2020-01-15 10:02:33 +0000400void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100401 int32_t& outPadTail, android::nn::PaddingScheme scheme)
402{
403 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
404}
405
Mike Kelly86b36d42019-07-12 16:39:33 +0100406#endif
407
Matthew Bentham912b3622019-05-03 15:49:14 +0100408Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100409{
410 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100411 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100412 shape.dimensions = operand.dimensions;
413 shape.scale = operand.scale;
414 shape.offset = operand.zeroPoint;
415 return shape;
416}
417
Kevin May42477c12020-03-26 13:34:14 +0000418#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100419
420Shape GetOperandShape(const V1_2::Operand& operand)
421{
422 Shape shape;
423 shape.type = OperandType(operand.type);
424 shape.dimensions = operand.dimensions;
425 shape.scale = operand.scale;
426 shape.offset = operand.zeroPoint;
427 return shape;
428}
429
430#endif
431
Kevin May42477c12020-03-26 13:34:14 +0000432#ifdef ARMNN_ANDROID_NN_V1_3
433
434Shape GetOperandShape(const V1_3::Operand& operand)
435{
436 Shape shape;
437 shape.type = OperandType(operand.type);
438 shape.dimensions = operand.dimensions;
439 shape.scale = operand.scale;
440 shape.offset = operand.zeroPoint;
441 return shape;
442}
443
444#endif
445
arovir01b0717b52018-09-05 17:03:25 +0100446// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
447// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100448// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
449// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100450void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000451 const armnn::TensorInfo& weightInfo,
452 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100453{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000454 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100455 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000456 // NOTE: Bias scale is always set to 0 for per-axis quantization and
457 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
458 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100459 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000460 return biasScale * inputInfo.GetQuantizationScale();
461 };
462
463 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
464 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
465
466 biasInfo.SetQuantizationScales(biasScales);
467 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
468
469 ALOGV("Bias quantization params have been updated for per-axis quantization");
470 }
471 else
472 {
473 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
474 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
475 {
476 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
477 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
478 {
479 ALOGW("Bias quantization scale has been modified to match input * weights");
480 biasInfo.SetQuantizationScale(expectedBiasScale);
481 }
arovir01b0717b52018-09-05 17:03:25 +0100482 }
483 }
484}
485
486// 4D Tensor Permutations
487const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100488const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
489
490// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000491const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
492const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100493
494template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000495armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
496 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100497{
498 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000499 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100500
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100501 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100502
503 // Connect input to swizzle layer
504 input.Connect(layer->GetInputSlot(0));
505
506 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000507 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100508 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
509
510 return *layer;
511}
512
arovir01b0717b52018-09-05 17:03:25 +0100513bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
514 const armnn::TensorShape & outputShape,
515 uint32_t concatDim)
516{
517 // Validate the output shape is correct given the input shapes (which have just been validated)
518 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
519 if (outputShape.GetNumDimensions() != numDimensions)
520 {
521 return Fail("%s: Output shape has wrong number of dimensions", __func__);
522 }
523
524 unsigned int outputSizeAlongConcatenatedDimension = 0;
525 for (unsigned int i = 0; i < inputShapes.size(); i++)
526 {
527 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
528 }
529
530 for (unsigned int i = 0; i < numDimensions; ++i)
531 {
532 if (i == concatDim)
533 {
534 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
535 {
536 return Fail(
537 "%s: Invalid output shape for dimension %d (%d != %d)",
538 __func__,
539 i,
540 outputShape[i],
541 outputSizeAlongConcatenatedDimension);
542 }
543 }
544 else
545 {
546 if (outputShape[i] != inputShapes[0][i])
547 {
548 return Fail("%s: Invalid output shape", __func__);
549 }
550 }
551 }
552
553 return true;
554}
555
556bool RequiresReshape(armnn::TensorShape & inputShape)
557{
558 return inputShape.GetNumDimensions() < 3;
559}
560
arovir01b0717b52018-09-05 17:03:25 +0100561void SwizzleInputs(armnn::INetwork& network,
562 std::vector<LayerInputHandle>& inputs,
563 std::vector<armnn::TensorShape>& inputShapes,
564 const armnn::PermutationVector& mapping)
565{
566 if (!mapping.IsEqual(IdentityPermutation4D))
567 {
568 size_t nInputs = inputs.size();
569 for (size_t i=0; i<nInputs; ++i)
570 {
571 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000572 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100573 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
574 auto& outputInfo = outputSlot.GetTensorInfo();
575 // replace inputs with the swizzled ones
576 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
577 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
578 }
579 }
580}
581
Teresa Charlin185f5882020-04-06 21:59:18 +0100582bool TransposeInputTensors(ConversionData& data,
583 std::vector<LayerInputHandle>& inputs,
584 std::vector<armnn::TensorShape>& inputShapes,
585 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000586{
587 if (!mapping.IsEqual(IdentityPermutation4D))
588 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100589 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000590 size_t nInputs = inputs.size();
591 for (size_t i=0; i<nInputs; ++i)
592 {
593 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000594 armnn::TransposeDescriptor transposeDesc;
595 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100596 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000597
598 bool isSupported = false;
599 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000600 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601 data.m_Backends,
602 isSupported,
603 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100604 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000605 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000606 if (!isSupported)
607 {
608 return false;
609 }
610
611 }
612 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
613 }
614 return true;
615}
616
617
narpra01f176d5a2018-11-18 20:17:48 +0000618bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
619 int32_t & concatDimension,
620 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100621{
narpra01f176d5a2018-11-18 20:17:48 +0000622 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100623 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100624
625 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000626 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
627 // or along dimension 0 or 2 for a 3-D tensor.
628 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100629 {
narpra01f176d5a2018-11-18 20:17:48 +0000630 concatDimension = 1;
631 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
632 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100633 }
narpra01f176d5a2018-11-18 20:17:48 +0000634 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100635 {
narpra01f176d5a2018-11-18 20:17:48 +0000636 concatDimension = 0;
637 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
638 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100639 }
narpra01f176d5a2018-11-18 20:17:48 +0000640 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100641}
642
643} // anonymous namespace
644
645namespace armnn_driver
646{
647
648//// Creates an ArmNN activation layer and connects it to the given layer, if the
649//// passed in AndroidNN activation function requires so.
650//// @return The end layer of the sequence of layers built for the given AndroidNN
651//// activation function or nullptr if an error occurred (e.g. unsupported activation).
652//// Note that the end layer matches the input layer if no activation is required
653//// (the sequence of layers has length 1).
654armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
655 ActivationFn activation,
656 armnn::IConnectableLayer* prevLayer,
657 ConversionData& data);
658
659} // namespace armnn_driver
660
661///
662/// Utility templates
663///
664
665namespace armnn_driver
666{
667
668using namespace android::nn;
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
671 typename HalOperand = typename HalPolicy::Operand,
672 typename HalOperation = typename HalPolicy::Operation,
673 typename HalModel = typename HalPolicy::Model>
674const HalOperand* GetInputOperand(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100677 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100678{
679 if (inputIndex >= operation.inputs.size())
680 {
saoste01b8471482018-10-10 09:44:51 +0100681 if (failOnIndexOutOfBounds)
682 {
683 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
684 }
arovir01b0717b52018-09-05 17:03:25 +0100685 return nullptr;
686 }
687
Kevin May42477c12020-03-26 13:34:14 +0000688 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100689 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000690 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalOperation = typename HalPolicy::Operation,
696 typename HalModel = typename HalPolicy::Model>
697const HalOperand* GetOutputOperand(const HalOperation& operation,
698 uint32_t outputIndex,
699 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100700{
701 if (outputIndex >= operation.outputs.size())
702 {
703 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
704 return nullptr;
705 }
706
707 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100708 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100709
Kevin May42477c12020-03-26 13:34:14 +0000710 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100714 typename HalOperand = typename HalPolicy::Operand,
715 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100716const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100717 const HalModel& model,
718 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100720{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100722
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100724 switch (operand.lifetime)
725 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100727 {
728 // Constant found in model.operandValues
729 valueStart = &model.operandValues[operand.location.offset];
730 break;
731 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100733 {
734 // Constant specified via a Memory object
735 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
736 break;
737 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000739 {
740 // An optional input tensor with no values is not an error so should not register as a fail
741 if (optional)
742 {
743 valueStart = nullptr;
744 break;
745 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100746 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000747 }
arovir01b0717b52018-09-05 17:03:25 +0100748 default:
749 {
750 // Unsupported/invalid (e.g. can't get value of an input to the model)
751 Fail("%s: unsupported/invalid operand lifetime: %s",
752 __func__, toString(operand.lifetime).c_str());
753 valueStart = nullptr;
754 }
755 }
756
757 return valueStart;
758}
759
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100760template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100761 typename HalOperation = typename HalPolicy::Operation,
762 typename HalModel = typename HalPolicy::Model,
763 typename HalOperandType = typename HalPolicy::OperandType>
764bool GetOperandType(const HalOperation& operation,
765 uint32_t inputIndex,
766 const HalModel& model,
767 HalOperandType& type)
768{
769 using HalOperand = typename HalPolicy::Operand;
770
771 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
772 if (!operand)
773 {
774 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
775 }
776
777 type = operand->type;
778 return true;
779}
780
781template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000782 typename HalOperand = typename HalPolicy::Operand>
783bool IsOperandConstant(const HalOperand& operand)
784{
785 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
786
787 HalOperandLifeTime lifetime = operand.lifetime;
788
789 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
790 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
791 lifetime == HalOperandLifeTime::NO_VALUE;
792}
793
794template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 typename HalOperand = typename HalPolicy::Operand,
796 typename HalModel = typename HalPolicy::Model>
797ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
798 const HalModel& model,
799 const ConversionData& data,
800 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
801 const armnn::TensorShape* overrideTensorShape = nullptr,
802 bool optional = false)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 if (!IsOperandTypeSupportedForTensors(operand.type))
805 {
806 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
807 return ConstTensorPin();
808 }
809
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000810 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811 {
812 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
813 return ConstTensorPin();
814 }
815
816 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
817 if (!valueStart)
818 {
819 if (optional)
820 {
821 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
822 return ConstTensorPin(true);
823 }
824 // mandatory tensor with no values
825 Fail("%s: failed to get operand address", __func__);
826 return ConstTensorPin();
827 }
828
829 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000830 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
831 if (tensorInfo.HasPerAxisQuantization())
832 {
833 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
834 }
835
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 if (overrideTensorShape != nullptr)
837 {
838 tensorInfo.SetShape(*overrideTensorShape);
839 }
840 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
841}
842
843template<typename HalPolicy,
844 typename HalOperation = typename HalPolicy::Operation,
845 typename HalModel = typename HalPolicy::Model>
846ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
847 uint32_t inputIndex,
848 const HalModel& model,
849 const ConversionData& data,
850 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
851 const armnn::TensorShape* overrideTensorShape = nullptr,
852 bool optional = false)
853{
854 using HalOperand = typename HalPolicy::Operand;
855
856 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
857 if (!operand)
858 {
859 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
860 return ConstTensorPin();
861 }
862 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
863 model,
864 data,
865 dimensionMappings,
866 overrideTensorShape,
867 optional);
868}
869
870template<typename HalPolicy,
871 typename OutputType,
872 typename HalOperandType = typename HalPolicy::OperandType,
873 typename HalOperation = typename HalPolicy::Operation,
874 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100875bool GetInputScalar(const HalOperation& operation,
876 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100877 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100878 OutputType& outValue,
879 const HalModel& model,
880 const ConversionData& data)
881{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100882 using HalOperand = typename HalPolicy::Operand;
883
884 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100885 if (!operand)
886 {
887 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
888 }
889
890 if (operand->type != type)
891 {
892 return Fail("%s: unexpected operand type: %s (should be %s)",
893 __func__, toString(operand->type).c_str(), toString(type).c_str());
894 }
895
896 if (operand->location.length != sizeof(OutputType))
897 {
898 return Fail("%s: incorrect operand location length: %i (should be %i)",
899 __func__, operand->location.length, sizeof(OutputType));
900 }
901
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100903 if (!valueAddress)
904 {
905 return Fail("%s: failed to get address for operand", __func__);
906 }
907
908 outValue = *(static_cast<const OutputType*>(valueAddress));
909 return true;
910}
911
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100912template<typename HalPolicy,
913 typename HalOperation = typename HalPolicy::Operation,
914 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100915bool GetInputInt32(const HalOperation& operation,
916 uint32_t inputIndex,
917 int32_t& outValue,
918 const HalModel& model,
919 const ConversionData& data)
920{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100921 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100922}
923
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100924template<typename HalPolicy,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100927bool GetInputFloat32(const HalOperation& operation,
928 uint32_t inputIndex,
929 float& outValue,
930 const HalModel& model,
931 const ConversionData& data)
932{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100934}
935
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936template<typename HalPolicy,
937 typename HalOperation = typename HalPolicy::Operation,
938 typename HalOperandType = typename HalPolicy::OperandType,
939 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100940bool GetInputActivationFunctionImpl(const HalOperation& operation,
941 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100942 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100943 ActivationFn& outActivationFunction,
944 const HalModel& model,
945 const ConversionData& data)
946{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100947 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100948 {
949 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
950 __func__,
951 toString(type).c_str(),
952 toString(OperandType::INT32).c_str(),
953 toString(OperandType::TENSOR_INT32).c_str());
954 }
955
956 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100957 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100958 {
959 return Fail("%s: failed to get activation input value", __func__);
960 }
961 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
962 return true;
963}
964
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100965template<typename HalPolicy,
966 typename HalOperation = typename HalPolicy::Operation,
967 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100968bool GetInputActivationFunction(const HalOperation& operation,
969 uint32_t inputIndex,
970 ActivationFn& outActivationFunction,
971 const HalModel& model,
972 const ConversionData& data)
973{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100974 return GetInputActivationFunctionImpl<HalPolicy>(operation,
975 inputIndex,
976 HalPolicy::OperandType::INT32,
977 outActivationFunction,
978 model,
979 data);
arovir01b0717b52018-09-05 17:03:25 +0100980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
986 uint32_t inputIndex,
987 ActivationFn& outActivationFunction,
988 const HalModel& model,
989 const ConversionData& data)
990{
991 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 return GetInputActivationFunctionImpl<HalPolicy>(operation,
993 inputIndex,
994 HalPolicy::OperandType::INT32,
995 outActivationFunction,
996 model,
997 data);
arovir01b0717b52018-09-05 17:03:25 +0100998}
999
1000
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001template<typename HalPolicy,
1002 typename HalOperation = typename HalPolicy::Operation,
1003 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001004bool GetOptionalInputActivation(const HalOperation& operation,
1005 uint32_t inputIndex,
1006 ActivationFn& activationFunction,
1007 const HalModel& model,
1008 const ConversionData& data)
1009{
1010 if (operation.inputs.size() <= inputIndex)
1011 {
1012 activationFunction = ActivationFn::kActivationNone;
1013 }
1014 else
1015 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001017 {
1018 return Fail("%s: Operation has invalid inputs", __func__);
1019 }
1020 }
1021 return true;
1022}
1023
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001024template<typename HalPolicy,
1025 typename ConvolutionDescriptor,
1026 typename HalOperation = typename HalPolicy::Operation,
1027 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001028bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1029 uint32_t dilationXIndex,
1030 ConvolutionDescriptor& descriptor,
1031 const HalModel& model,
1032 const ConversionData& data)
1033{
1034 bool success = true;
1035 if (operation.inputs.size() >= dilationXIndex + 2)
1036 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037 success &= GetInputScalar<HalPolicy>(operation,
1038 dilationXIndex,
1039 HalPolicy::OperandType::INT32,
1040 descriptor.m_DilationX,
1041 model,
1042 data);
1043 success &= GetInputScalar<HalPolicy>(operation,
1044 dilationXIndex + 1,
1045 HalPolicy::OperandType::INT32,
1046 descriptor.m_DilationY,
1047 model,
1048 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001049 }
1050
1051 return success;
1052}
1053
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001054template<typename HalPolicy,
1055 typename HalOperand = typename HalPolicy::Operand,
1056 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001057bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001058 std::vector<int32_t>& outValues,
1059 const HalModel& model,
1060 const ConversionData& data)
1061{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001062 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001063 {
1064 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1065 }
1066
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001067 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001068 if (!startAddress)
1069 {
1070 return Fail("%s: failed to get operand address", __func__, operand.type);
1071 }
1072
1073 // Check number of bytes is sensible
1074 const uint32_t numBytes = operand.location.length;
1075 if (numBytes % sizeof(int32_t) != 0)
1076 {
1077 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1078 __func__, numBytes, sizeof(int32_t));
1079 }
1080
1081 outValues.resize(numBytes / sizeof(int32_t));
1082 memcpy(outValues.data(), startAddress, numBytes);
1083 return true;
1084}
1085
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001086template<typename HalPolicy,
1087 typename HalOperation = typename HalPolicy::Operation,
1088 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001089bool GetInputPaddingScheme(const HalOperation& operation,
1090 uint32_t inputIndex,
1091 PaddingScheme& outPaddingScheme,
1092 const HalModel& model,
1093 const ConversionData& data)
1094{
1095 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001096 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001097 {
1098 return Fail("%s: failed to get padding scheme input value", __func__);
1099 }
1100
1101 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1102 return true;
1103}
1104
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001105template<typename HalPolicy,
1106 typename HalOperation = typename HalPolicy::Operation,
1107 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001108LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1109 uint32_t inputIndex,
1110 const HalModel& model,
1111 ConversionData& data)
1112{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001113 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001114 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001115 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1116
1117 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001118 if (!operand)
1119 {
1120 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1121 return LayerInputHandle();
1122 }
1123
1124 if (!IsOperandTypeSupportedForTensors(operand->type))
1125 {
1126 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1127 return LayerInputHandle();
1128 }
1129
Sadik Armagan44bcc022019-06-18 17:21:36 +01001130 try
arovir01b0717b52018-09-05 17:03:25 +01001131 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001132 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001133 if (IsDynamicTensor(operandTensorInfo))
1134 {
1135 Fail("%s: dynamic input tensors are not supported", __func__);
1136 return LayerInputHandle();
1137 }
arovir01b0717b52018-09-05 17:03:25 +01001138
Sadik Armagan44bcc022019-06-18 17:21:36 +01001139 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001140 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001141 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001142 {
1143 // NOTE: We must check whether we can support the input tensor on at least one
1144 // of the provided backends; otherwise we cannot convert the operation
1145 bool isInputSupported = false;
1146 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1147 IsInputSupported,
1148 data.m_Backends,
1149 isInputSupported,
1150 operandTensorInfo);
1151
1152 if (!isInputSupported)
1153 {
1154 Fail("%s: unsupported input tensor", __func__);
1155 return LayerInputHandle();
1156 }
1157
1158 BOOST_FALLTHROUGH; // intentional fallthrough
1159 }
1160 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001161 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001162 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001163 // The tensor is either an operand internal to the model, or a model input.
1164 // It can be associated with an ArmNN output slot for an existing layer.
1165
1166 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1167 const uint32_t operandIndex = operation.inputs[inputIndex];
1168 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001169 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001170 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001171 case HalOperandLifeTime::CONSTANT_REFERENCE:
1172 {
1173 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1174 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1175 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001176 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001177 bool isSupported = false;
1178 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1179 IsConstantSupported,
1180 data.m_Backends,
1181 isSupported,
1182 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001183 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001184 {
1185 return LayerInputHandle();
1186 }
1187
1188 armnn::IConnectableLayer* constantLayer =
1189 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1190 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1191 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1192
1193 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1194 }
1195 else
1196 {
1197 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001198 return LayerInputHandle();
1199 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001200 break;
arovir01b0717b52018-09-05 17:03:25 +01001201 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001202 default:
arovir01b0717b52018-09-05 17:03:25 +01001203 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001204 // Unsupported lifetime for an input tensor
1205 Fail("%s: unsupported lifetime for input tensor: %s",
1206 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001207 return LayerInputHandle();
1208 }
arovir01b0717b52018-09-05 17:03:25 +01001209 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001210 }
1211 catch (UnsupportedOperand<HalOperandType>& e)
1212 {
1213 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1214 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001215 }
1216}
1217
Kevin May42477c12020-03-26 13:34:14 +00001218
1219#ifdef ARMNN_ANDROID_NN_V1_3
1220template<typename HalPolicy>
1221LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1222 uint32_t inputIndex,
1223 const::android::hardware::neuralnetworks::V1_3::Model& model,
1224 ConversionData& data)
1225{
1226 using HalOperand = typename HalPolicy::Operand;
1227 using HalOperandType = typename HalPolicy::OperandType;
1228 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1229
1230 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1231 if (!operand)
1232 {
1233 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1234 return LayerInputHandle();
1235 }
1236
1237 if (!IsOperandTypeSupportedForTensors(operand->type))
1238 {
1239 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1240 return LayerInputHandle();
1241 }
1242
1243 try
1244 {
1245 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1246 if (IsDynamicTensor(operandTensorInfo))
1247 {
1248 Fail("%s: dynamic input tensors are not supported", __func__);
1249 return LayerInputHandle();
1250 }
1251
1252 switch (operand->lifetime)
1253 {
1254 case HalOperandLifeTime::SUBGRAPH_INPUT:
1255 {
1256 // NOTE: We must check whether we can support the input tensor on at least one
1257 // of the provided backends; otherwise we cannot convert the operation
1258 bool isInputSupported = false;
1259 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1260 IsInputSupported,
1261 data.m_Backends,
1262 isInputSupported,
1263 operandTensorInfo);
1264
1265 if (!isInputSupported)
1266 {
1267 Fail("%s: unsupported input tensor", __func__);
1268 return LayerInputHandle();
1269 }
1270
1271 BOOST_FALLTHROUGH; // intentional fallthrough
1272 }
1273 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1274 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1275 {
1276 // The tensor is either an operand internal to the model, or a model input.
1277 // It can be associated with an ArmNN output slot for an existing layer.
1278
1279 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1280 const uint32_t operandIndex = operation.inputs[inputIndex];
1281 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1282 }
1283 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1284 case HalOperandLifeTime::CONSTANT_REFERENCE:
1285 {
1286 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1287 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1288 if (tensorPin.IsValid())
1289 {
1290 bool isSupported = false;
1291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1292 IsConstantSupported,
1293 data.m_Backends,
1294 isSupported,
1295 tensorPin.GetConstTensor().GetInfo());
1296 if (!isSupported)
1297 {
1298 return LayerInputHandle();
1299 }
1300
1301 armnn::IConnectableLayer* constantLayer =
1302 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1303 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1304 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1305
1306 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1307 }
1308 else
1309 {
1310 Fail("%s: invalid operand tensor", __func__);
1311 return LayerInputHandle();
1312 }
1313 break;
1314 }
1315 default:
1316 {
1317 // Unsupported lifetime for an input tensor
1318 Fail("%s: unsupported lifetime for input tensor: %s",
1319 __func__, toString(operand->lifetime).c_str());
1320 return LayerInputHandle();
1321 }
1322 }
1323 }
1324 catch (UnsupportedOperand<HalOperandType>& e)
1325 {
1326 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1327 return LayerInputHandle();
1328 }
1329}
1330#endif
1331
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001332template<typename HalPolicy,
1333 typename HalOperation = typename HalPolicy::Operation,
1334 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001335bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1336 uint32_t operationOutputIndex,
1337 armnn::IConnectableLayer& layer,
1338 uint32_t layerOutputIndex,
1339 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001340 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001341{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001342 using HalOperand = typename HalPolicy::Operand;
1343
1344 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001345 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1346 {
1347 return false;
1348 }
1349
1350 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1351
1352 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1353 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1354
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001355 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001356
1357 return true;
1358}
1359
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001360template<typename HalPolicy,
1361 typename HalOperation = typename HalPolicy::Operation,
1362 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001363armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1364 uint32_t inputIndex,
1365 const HalModel& model,
1366 ConversionData& data)
1367{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001368 using HalOperand = typename HalPolicy::Operand;
1369
1370 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001371 if (!operand)
1372 {
1373 return armnn::DataLayout::NHWC;
1374 }
1375
1376 if (!IsBool(*operand))
1377 {
1378 return armnn::DataLayout::NHWC;
1379 }
1380
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001381 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001382 if (!valueAddress)
1383 {
1384 return armnn::DataLayout::NHWC;
1385 }
1386
1387 if (*(static_cast<const bool*>(valueAddress)))
1388 {
1389 return armnn::DataLayout::NCHW;
1390 }
1391 else
1392 {
1393 return armnn::DataLayout::NHWC;
1394 }
1395}
1396
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001397template<typename HalPolicy,
1398 typename HalOperation = typename HalPolicy::Operation,
1399 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001400bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1401 uint32_t outputIndex,
1402 armnn::IConnectableLayer& layer,
1403 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001404 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001405{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001406 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1407 outputIndex,
1408 layer,
1409 outputIndex,
1410 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001411 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412}
1413
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001414template<typename HalPolicy,
1415 typename HalOperation = typename HalPolicy::Operation,
1416 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001417bool ConvertToActivation(const HalOperation& operation,
1418 const char* operationName,
1419 const armnn::ActivationDescriptor& activationDesc,
1420 const HalModel& model,
1421 ConversionData& data)
1422{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001423 using HalOperand = typename HalPolicy::Operand;
1424
1425 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001426 if (!input.IsValid())
1427 {
1428 return Fail("%s: Input 0 is invalid", operationName);
1429 }
1430
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001431 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001432 if (!outputOperand)
1433 {
1434 return false;
1435 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001436
1437 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001438 if (IsDynamicTensor(outInfo))
1439 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001440 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001441 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001442
1443 bool isSupported = false;
1444 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1445 IsActivationSupported,
1446 data.m_Backends,
1447 isSupported,
1448 input.GetTensorInfo(),
1449 outInfo,
1450 activationDesc);
1451 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001452 {
1453 return false;
1454 }
1455
1456 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001457 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001458 input.Connect(layer->GetInputSlot(0));
1459
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001460 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001461}
1462
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001463template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001464 typename HalOperation = typename HalPolicy::Operation,
1465 typename HalModel = typename HalPolicy::Model>
1466bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1467{
1468 armnn::ActivationDescriptor desc;
1469 desc.m_Function = armnn::ActivationFunction::ReLu;
1470
1471 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1472}
1473
1474template<typename HalPolicy,
1475 typename HalOperation = typename HalPolicy::Operation,
1476 typename HalModel = typename HalPolicy::Model>
1477bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1478{
1479 armnn::ActivationDescriptor desc;
1480 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1481 desc.m_A = 1.0f;
1482 desc.m_B = -1.0f;
1483
1484 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1485}
1486
1487template<typename HalPolicy,
1488 typename HalOperation = typename HalPolicy::Operation,
1489 typename HalModel = typename HalPolicy::Model>
1490bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1491{
1492 armnn::ActivationDescriptor desc;
1493 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1494 desc.m_A = 6.0f;
1495
1496 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1497}
1498
1499template<typename HalPolicy,
1500 typename HalOperation = typename HalPolicy::Operation,
1501 typename HalModel = typename HalPolicy::Model>
1502bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1503{
1504 armnn::ActivationDescriptor desc;
1505 desc.m_Function = armnn::ActivationFunction::TanH;
1506 desc.m_A = 1.0f; // android nn does not support tanH parameters
1507 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1508
1509 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1510}
1511
1512template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001513 typename HalOperation = typename HalPolicy::Operation,
1514 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001515bool ConvertPaddings(const HalOperation& operation,
1516 const HalModel& model,
1517 ConversionData& data,
1518 unsigned int rank,
1519 armnn::PadDescriptor& padDescriptor)
1520{
1521 using HalOperand = typename HalPolicy::Operand;
1522
1523 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1524 if (!paddingsOperand)
1525 {
1526 return Fail("%s: Could not read paddings operand", __func__);
1527 }
1528
1529 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1530 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1531 {
1532 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1533 }
1534
1535 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001536 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1537 {
1538 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1539 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001540
1541 // add padding for each dimension of input tensor.
1542 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1543 {
1544 int paddingBeforeInput = paddings[i];
1545 int paddingAfterInput = paddings[i + 1];
1546
1547 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1548 {
1549 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1550 }
1551
1552 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1553 }
1554
1555 return true;
1556}
1557
1558template<typename HalPolicy,
1559 typename HalOperation = typename HalPolicy::Operation,
1560 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001561bool ConvertPooling2d(const HalOperation& operation,
1562 const char* operationName,
1563 armnn::PoolingAlgorithm poolType,
1564 const HalModel& model,
1565 ConversionData& data)
1566{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001567 using HalOperand = typename HalPolicy::Operand;
1568 using HalOperandType = typename HalPolicy::OperandType;
1569
1570 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001571 if (!input.IsValid())
1572 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001573 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001574 }
1575
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001576 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001577 if (!output)
1578 {
1579 return Fail("%s: Could not read output 0", __func__);
1580 }
1581
1582 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1583 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1584
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001585 if (IsDynamicTensor(outputInfo))
1586 {
1587 return Fail("%s: Dynamic output tensors are not supported", __func__);
1588 }
1589
arovir01b0717b52018-09-05 17:03:25 +01001590 armnn::Pooling2dDescriptor desc;
1591 desc.m_PoolType = poolType;
1592 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001593 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001594
1595 ActivationFn activation;
1596
Sadik Armagan15d63e22019-07-26 16:59:35 +01001597 auto inputSize = operation.inputs.size();
1598
1599 if (inputSize >= 10)
1600 {
1601 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1602 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1603 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1604 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1605 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1606 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1607 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1608 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1609 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1610 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1611 {
1612 return Fail("%s: Operation has invalid inputs", operationName);
1613 }
1614
Kevin May42477c12020-03-26 13:34:14 +00001615 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001616 {
1617 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1618 }
1619 }
1620 else
arovir01b0717b52018-09-05 17:03:25 +01001621 {
1622 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1623 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001624 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1625 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1626 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1627 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1628 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1629 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001630 {
1631 return Fail("%s: Operation has invalid inputs", operationName);
1632 }
1633
Kevin May42477c12020-03-26 13:34:14 +00001634 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001635 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001636 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001637 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001638
1639 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1640 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1641 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1642
1643 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1644 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001645 }
1646
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001647 bool isSupported = false;
1648 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1649 IsPooling2dSupported,
1650 data.m_Backends,
1651 isSupported,
1652 inputInfo,
1653 outputInfo,
1654 desc);
1655 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001656 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001657 return false;
arovir01b0717b52018-09-05 17:03:25 +01001658 }
arovir01b0717b52018-09-05 17:03:25 +01001659
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001660 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1661 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001662 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001663 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001664 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001665
1666 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1667 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001668 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001669 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001670 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001671
1672 input.Connect(pooling2dLayer->GetInputSlot(0));
1673
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001674 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001675}
1676
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001677template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001678 typename HalOperation = typename HalPolicy::Operation,
1679 typename HalModel = typename HalPolicy::Model>
1680bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001681{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001682 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001683
1684 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1685 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1686
1687 if (!input0.IsValid() || !input1.IsValid())
1688 {
1689 return Fail("%s: Operation has invalid inputs", __func__);
1690 }
1691
1692 // The FuseActivation parameter is always the input index 2
1693 // and it should be optional
1694 ActivationFn activationFunction;
1695 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1696 {
1697 return Fail("%s: Operation has invalid inputs", __func__);
1698 }
1699
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001700 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001701 if (!outputOperand)
1702 {
1703 return false;
1704 }
1705
1706 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1707 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1708
1709 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1710 if (IsDynamicTensor(outputInfo))
1711 {
1712 return Fail("%s: Dynamic output tensors are not supported", __func__);
1713 }
1714
1715 bool isSupported = false;
1716 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1717 IsAdditionSupported,
1718 data.m_Backends,
1719 isSupported,
1720 inputInfo0,
1721 inputInfo1,
1722 outputInfo);
1723 if (!isSupported)
1724 {
1725 return false;
1726 }
1727
1728 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1729 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1730
1731 if (endLayer != nullptr)
1732 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001733 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001734 if (!isReshapeSupported)
1735 {
1736 return false;
1737 }
1738
Mike Kelly46272802019-08-14 17:00:48 +01001739 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1740 }
1741 else
1742 {
1743 return Fail("%s: ProcessActivation failed", __func__);
1744 }
1745}
1746
1747template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001748 typename HalOperation = typename HalPolicy::Operation,
1749 typename HalModel = typename HalPolicy::Model>
1750bool ConvertArgMinMax(const HalOperation& operation,
1751 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001752 ConversionData& data,
1753 armnn::ArgMinMaxFunction argMinMaxFunction)
1754{
1755 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1756
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001757 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001758 using HalOperandType = typename HalPolicy::OperandType;
1759
1760 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1761
1762 if (!input0.IsValid())
1763 {
1764 return Fail("%s: Operation has invalid inputs", __func__);
1765 }
1766
1767 int32_t axis;
1768 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1769 {
1770 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1771 }
1772
1773 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1774 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1775
1776 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1777 {
1778 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1779 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1780 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1781 return Fail("%s: Axis must be in range [-n, n)", __func__);
1782 }
1783
1784 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1785 if (!output)
1786 {
1787 return Fail("%s: Could not read output 0", __func__);
1788 }
1789
1790 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1791
1792 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1793 if (IsDynamicTensor(outputInfo))
1794 {
1795 return Fail("%s: Dynamic output tensors are not supported", __func__);
1796 }
1797
1798 armnn::ArgMinMaxDescriptor descriptor;
1799 descriptor.m_Function = argMinMaxFunction;
1800 descriptor.m_Axis = axis;
1801
1802 bool isSupported = false;
1803 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1804 IsArgMinMaxSupported,
1805 data.m_Backends,
1806 isSupported,
1807 inputInfo0,
1808 outputInfo,
1809 descriptor);
1810 if (!isSupported)
1811 {
1812 return false;
1813 }
1814
1815 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1816 assert(layer != nullptr);
1817
1818 input0.Connect(layer->GetInputSlot(0));
1819
1820 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1821}
1822
1823template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001824 typename HalOperation = typename HalPolicy::Operation,
1825 typename HalModel = typename HalPolicy::Model>
1826bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001827{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001828 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001829 using HalOperandType = typename HalPolicy::OperandType;
1830
1831 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1832 if (operation.inputs.size() <= 1)
1833 {
1834 return Fail("%s: Operation has insufficient arguments", __func__);
1835 }
1836
1837 // Get inputs and outputs
1838 const std::size_t numInputTensors = operation.inputs.size() - 1;
1839
1840 int32_t concatDim;
1841 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1842 {
1843 return Fail("%s: Operation has invalid inputs", __func__);
1844 }
1845
1846 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1847 if (!outputOperand)
1848 {
1849 return Fail("%s: Operation has no outputs", __func__);
1850 }
1851
1852
1853 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1854 armnn::TensorShape outputShape = outputInfo.GetShape();
1855
1856 //
1857 // handle negative concat dims along the lines of tensorflow as described here:
1858 // https://www.tensorflow.org/api_docs/python/tf/concat
1859 // "negative axis refers to axis + rank(values)-th dimension"
1860 //
1861 if (concatDim < 0)
1862 {
1863 concatDim += outputShape.GetNumDimensions();
1864 }
1865
1866 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1867 {
1868 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1869 }
1870
1871 std::vector<LayerInputHandle> inputHandles;
1872 std::vector<armnn::TensorShape> inputShapes;
1873
1874 inputHandles.reserve(numInputTensors);
1875 inputShapes.reserve(numInputTensors);
1876
1877 bool inputsHaveBeenReshaped = false;
1878 unsigned int tensorDimensionsAdded = 0;
1879
1880 for (uint32_t i = 0; i < numInputTensors; ++i)
1881 {
1882 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1883 if (!operand)
1884 {
1885 return Fail("%s: Operation has invalid inputs", __func__);
1886 }
1887
Teresa Charlin3b959602019-10-31 17:05:47 +00001888 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1889 if (!operandInputHandle.IsValid())
1890 {
1891 return Fail("%s: Operation has invalid inputs", __func__);
1892 }
Mike Kellyb8805202019-07-31 17:25:43 +01001893
Teresa Charlin3b959602019-10-31 17:05:47 +00001894 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001895 if (operandShape.GetNumDimensions() == 0)
1896 {
1897 return Fail("%s: Operands with rank 0 are not supported", __func__);
1898 }
1899
1900 if (RequiresReshape(operandShape))
1901 {
1902 inputsHaveBeenReshaped = true;
1903
1904 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1905
1906 // Expand the tensor to three dimensions
1907 if (operandShape.GetNumDimensions() == 2)
1908 {
1909 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1910 tensorDimensionsAdded = 1;
1911 }
1912 else
1913 {
1914 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1915 tensorDimensionsAdded = 2;
1916 }
1917
Kevin Mayaed08ac2019-12-12 16:33:31 +00001918 armnn::ReshapeDescriptor reshapeDescriptor;
1919 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1920
1921 bool isSupported = false;
1922 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1923 IsReshapeSupported,
1924 data.m_Backends,
1925 isSupported,
1926 operandInputHandle.GetTensorInfo(),
1927 reshapeInfo,
1928 reshapeDescriptor);
1929 if (!isSupported)
1930 {
1931 return false;
1932 }
1933
Mike Kellyb8805202019-07-31 17:25:43 +01001934 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1935 *data.m_Network,
1936 operandInputHandle,
1937 reshapeInfo
1938 );
1939
1940 // Point to the reshape operation rather then the input operation
1941 operandShape = reshapeInfo.GetShape();
1942 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1943 }
1944
1945 inputShapes.emplace_back(operandShape);
1946 inputHandles.emplace_back(operandInputHandle);
1947
1948 if (!inputHandles.back().IsValid())
1949 {
1950 return Fail("%s: Operation has invalid inputs", __func__);
1951 }
1952 }
1953
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001954 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01001955
1956 if (inputsHaveBeenReshaped)
1957 {
1958 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1959 concatDim += tensorDimensionsAdded;
1960
1961 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1962 if (tensorDimensionsAdded == 1)
1963 {
1964 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1965 }
1966 else if (tensorDimensionsAdded == 2)
1967 {
1968 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1969 }
1970 }
1971
1972 // Check if permutations is required and get the pair of permutations required for the concatenation.
1973 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1974 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1975 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1976
1977 bool needPermute =
1978 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1979
1980 if (needPermute)
1981 {
Mike Kelly4a956582020-02-28 10:32:09 +00001982 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01001983 }
1984
1985 outputInfo.SetShape(outputShape);
1986
1987 // this is no-op for identity swizzles, otherwise it replaces both
1988 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01001989 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00001990 {
1991 return false;
1992 }
Mike Kellyb8805202019-07-31 17:25:43 +01001993
1994 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1995 armnn::OriginsDescriptor concatDescriptor;
1996
1997 try
1998 {
1999 // The concat descriptor is always created across the only supported concat dimension
2000 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2001 concatDescriptor =
2002 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2003 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002004 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002005 {
2006 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2007 }
2008
2009 // Validate the output shape is correct given the input shapes based on the
2010 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2011 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2012 {
2013 return Fail("%s: Error validating the output shape for concat", __func__);
2014 }
2015
2016 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2017 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2018 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2019
2020 bool isSupported = false;
2021 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2022 IsConcatSupported,
2023 data.m_Backends,
2024 isSupported,
2025 inputTensorInfos,
2026 outputInfo,
2027 concatDescriptor);
2028 if (!isSupported)
2029 {
2030 return false;
2031 }
2032
2033 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2034 assert(layer != nullptr);
2035 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2036
2037 // Connect inputs to the layer
2038 const int numInputSlots = layer->GetNumInputSlots();
2039 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2040 for (int i = 0; i < numInputSlots; ++i)
2041 {
2042 // connect the input directly to the merge (concat) layer
2043 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2044 }
2045
2046 if (needPermute)
2047 {
Mike Kelly4a956582020-02-28 10:32:09 +00002048 armnn::TransposeDescriptor transposeDesc;
2049 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002050 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2051 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2052 permutationPair.second);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002053
2054 bool isSupported = false;
2055 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002056 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002057 data.m_Backends,
2058 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002059 inputTransposeInfo,
2060 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002061 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002062 if (!isSupported)
2063 {
2064 return false;
2065 }
Mike Kellyb8805202019-07-31 17:25:43 +01002066 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002067 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2068 layer->GetOutputSlot(0),
2069 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002070 layer = &deswizzleLayer;
2071 }
2072
2073 if (inputsHaveBeenReshaped)
2074 {
2075 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2076
2077 // Undo the reshape knowing the amount of dimensions added
2078 if (tensorDimensionsAdded == 1)
2079 {
2080 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2081 afterConcatInfo.GetShape()[2] }));
2082 }
2083 else if (tensorDimensionsAdded == 2)
2084 {
2085 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2086 }
2087
Kevin Mayaed08ac2019-12-12 16:33:31 +00002088 armnn::ReshapeDescriptor reshapeDescriptor;
2089 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2090
2091 bool isSupported = false;
2092 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2093 IsReshapeSupported,
2094 data.m_Backends,
2095 isSupported,
2096 layer->GetOutputSlot(0).GetTensorInfo(),
2097 afterConcatInfo,
2098 reshapeDescriptor);
2099 if (!isSupported)
2100 {
2101 return false;
2102 }
2103
Mike Kellyb8805202019-07-31 17:25:43 +01002104 layer = &AddReshapeLayer(
2105 *data.m_Network,
2106 layer->GetOutputSlot(0),
2107 afterConcatInfo
2108 );
2109 }
2110
2111 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2112}
2113
2114template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002115 typename HalOperation = typename HalPolicy::Operation,
2116 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002117bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2118{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002119 using HalOperand = typename HalPolicy::Operand;
2120 using HalOperandType = typename HalPolicy::OperandType;
2121
2122 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002123 if (!input.IsValid())
2124 {
2125 return Fail("%s: Operation has invalid inputs", __func__);
2126 }
2127
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002128 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002129 if (!output)
2130 {
2131 return Fail("%s: Could not read output 0", __func__);
2132 }
2133
2134 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002135 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002136
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002137 if (IsDynamicTensor(outputInfo))
2138 {
2139 return Fail("%s: Dynamic output tensors are not supported", __func__);
2140 }
2141
Mike Kellyb5fdf382019-06-11 16:35:25 +01002142 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002143 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2144 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002145
2146 if (!weightsPin.IsValid() || !biasPin.IsValid())
2147 {
2148 return Fail("%s: Operation has invalid inputs", __func__);
2149 }
2150
2151 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002152 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002153 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2154
2155 armnn::Convolution2dDescriptor desc;
2156 desc.m_DataLayout = armnn::DataLayout::NHWC;
2157 ActivationFn activation;
2158
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002159 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002160 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002161 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2162 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2163 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2164 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2165 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2166 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002167 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002168 {
2169 return Fail("%s: Operation has invalid inputs", __func__);
2170 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002171 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002172 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173 {
2174 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002175 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2176 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2177 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002178 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002179 {
2180 return Fail("%s: Operation has invalid inputs", __func__);
2181 }
2182
2183 const uint32_t kernelX = weights.GetShape()[2];
2184 const uint32_t kernelY = weights.GetShape()[1];
2185 const uint32_t inputX = inputInfo.GetShape()[2];
2186 const uint32_t inputY = inputInfo.GetShape()[1];
2187
2188 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2189 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002190 }
2191 else
2192 {
2193 return Fail("%s: Unsupported number of operation inputs", __func__);
2194 }
2195
2196 desc.m_BiasEnabled = true;
2197 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2198
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002199 bool isSupported = false;
2200 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2201 IsConvolution2dSupported,
2202 data.m_Backends,
2203 isSupported,
2204 inputInfo,
2205 outputInfo,
2206 desc,
2207 weights.GetInfo(),
2208 biases);
2209 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002210 {
2211 return false;
2212 }
2213
2214 armnn::IConnectableLayer* startLayer =
2215 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2216
2217 if (!startLayer)
2218 {
2219 return Fail("%s: AddConvolution2dLayer failed", __func__);
2220 }
2221
2222 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2223
2224 if (!endLayer)
2225 {
2226 return Fail("%s: ProcessActivation failed", __func__);
2227 }
2228
2229 input.Connect(startLayer->GetInputSlot(0));
2230
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002231 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002232}
2233
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002234template<typename HalPolicy,
2235 typename HalOperation = typename HalPolicy::Operation,
2236 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002237bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2238{
2239 using HalOperand = typename HalPolicy::Operand;
2240 using HalOperandType = typename HalPolicy::OperandType;
2241
2242 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2243 if (!input.IsValid() )
2244 {
2245 return Fail("%s: Operation has invalid inputs", __func__);
2246 }
2247
2248 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2249 unsigned int rank = inputInfo.GetNumDimensions();
2250 if (rank != 4)
2251 {
2252 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2253 }
2254
2255 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2256 if (!output)
2257 {
2258 return Fail("%s: Could not read output 0", __func__);
2259 }
2260
2261 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2262 if (IsDynamicTensor(outputInfo))
2263 {
2264 return Fail("%s: Dynamic output tensors are not supported", __func__);
2265 }
2266
2267 armnn::DepthToSpaceDescriptor descriptor;
2268
2269 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2270 if (descriptor.m_BlockSize <= 1)
2271 {
2272 return Fail("%s: Block size must be at least 1 in all dimensions");
2273 }
2274
2275 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002276 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002277 {
2278 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2279 }
2280
2281 bool isSupported = false;
2282 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2283 IsDepthToSpaceSupported,
2284 data.m_Backends,
2285 isSupported,
2286 inputInfo,
2287 outputInfo,
2288 descriptor);
2289 if (!isSupported)
2290 {
2291 return false;
2292 }
2293
2294 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2295 assert(layer != nullptr);
2296 input.Connect(layer->GetInputSlot(0));
2297
2298 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2299}
2300
2301template<typename HalPolicy,
2302 typename HalOperation = typename HalPolicy::Operation,
2303 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002304bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2305{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002306 using HalOperand = typename HalPolicy::Operand;
2307 using HalOperandType = typename HalPolicy::OperandType;
2308
2309 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002310
2311 if (!input.IsValid())
2312 {
2313 return Fail("%s: Operation has invalid inputs", __func__);
2314 }
2315
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002316 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002317
2318 if (!output)
2319 {
2320 return Fail("%s: Could not read output 0", __func__);
2321 }
2322
2323 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002324 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002325
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002326 if (IsDynamicTensor(outputInfo))
2327 {
2328 return Fail("%s: Dynamic output tensors are not supported", __func__);
2329 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002330
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002331 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002332 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002333 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002334
2335 if (weightsOperand == nullptr)
2336 {
2337 return Fail("%s: Operand is invalid", __func__);
2338 }
2339 armnn::DepthwiseConvolution2dDescriptor desc;
2340 desc.m_DataLayout = armnn::DataLayout::NHWC;
2341
Mike Kellyb5fdf382019-06-11 16:35:25 +01002342 // Reinterpret weight data as [ H, W, I, M ]
2343 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2344 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002345 inputInfo.GetShape()[3],
2346 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002347
2348 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2349 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2350
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002351 const ConstTensorPin weightsPin =
2352 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2353 1,
2354 model,
2355 data,
2356 HWIMToMIHW,
2357 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002358
2359 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002360 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002361
2362 if (!weightsPin.IsValid() || !biasPin.IsValid())
2363 {
2364 return Fail("%s: Operation has invalid inputs", __func__);
2365 }
2366
2367 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2368 armnn::ConstTensor bias = biasPin.GetConstTensor();
2369 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2370
2371 ActivationFn activation;
2372
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002373 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002374 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002375 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2376 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2377 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2378 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2379 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2380 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002381 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002382 {
2383 return Fail("%s: Operation has invalid inputs", __func__);
2384 }
2385 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002386 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002387 {
2388 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002389 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2390 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2391 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002392 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002393 {
2394 return Fail("%s: Operation has invalid inputs", __func__);
2395 }
2396
2397 const uint32_t kernelX = weights.GetShape()[3];
2398 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002399 const uint32_t inputX = inputInfo.GetShape()[2];
2400 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002401
2402 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2403 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2404 }
2405 else
2406 {
2407 return Fail("%s: Unsupported number of operation inputs", __func__);
2408 }
2409
2410 desc.m_BiasEnabled = true;
2411 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2412
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002413 bool isSupported = false;
2414 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2415 IsDepthwiseConvolutionSupported,
2416 data.m_Backends,
2417 isSupported,
2418 inputInfo,
2419 outputInfo,
2420 desc,
2421 weights.GetInfo(),
2422 biases);
2423 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002424 {
2425 return false;
2426 }
2427
2428 armnn::IConnectableLayer* startLayer =
2429 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2430 if (!startLayer)
2431 {
2432 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2433 }
2434
2435 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2436 if (!endLayer)
2437 {
2438 return Fail("%s: ProcessActivation failed", __func__);
2439 }
2440
2441 input.Connect(startLayer->GetInputSlot(0));
2442
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002443 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002444}
2445
Mike Kelly3c673942019-07-25 09:26:06 +01002446template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002447 typename HalOperation = typename HalPolicy::Operation,
2448 typename HalModel = typename HalPolicy::Model>
2449bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002450{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002451 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002452
2453 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2454 if (!input.IsValid())
2455 {
2456 return Fail("%s: Operation has invalid input", __func__);
2457 }
2458
Sadik Armagan98c0f662019-11-21 15:54:36 +00002459 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2460 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2461 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2462 {
2463 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2464 }
2465
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002466 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002467 if (!outputOperand)
2468 {
2469 return Fail("%s: Operation has invalid outputs", __func__);
2470 }
2471
2472 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2473 if (IsDynamicTensor(outputInfo))
2474 {
2475 return Fail("%s: Dynamic output tensors are not supported", __func__);
2476 }
2477
2478 bool isSupported = false;
2479 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2480 IsDequantizeSupported,
2481 data.m_Backends,
2482 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002483 inputInfo,
2484 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002485 if (!isSupported)
2486 {
2487 return false;
2488 }
2489
2490 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2491 assert(layer != nullptr);
2492 input.Connect(layer->GetInputSlot(0));
2493
2494 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2495}
2496
2497template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002498 typename HalOperation = typename HalPolicy::Operation,
2499 typename HalModel = typename HalPolicy::Model>
2500bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002501{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002502 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002503
2504 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2505 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2506
2507 if (!input0.IsValid() || !input1.IsValid())
2508 {
2509 return Fail("%s: Operation has invalid inputs", __func__);
2510 }
2511
2512 // The FuseActivation parameter is always the input index 2
2513 // and it should be optional
2514 ActivationFn activationFunction;
2515 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2516 {
2517 return Fail("%s: Operation has invalid inputs", __func__);
2518 }
2519
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002520 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002521 if (!output)
2522 {
2523 return Fail("%s: Could not read output 0", __func__);
2524 }
2525
2526 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2527 if (IsDynamicTensor(outputInfo))
2528 {
2529 return Fail("%s: Dynamic output tensors are not supported", __func__);
2530 }
2531
2532 bool isSupported = false;
2533 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2534 IsDivisionSupported,
2535 data.m_Backends,
2536 isSupported,
2537 input0.GetTensorInfo(),
2538 input1.GetTensorInfo(),
2539 outputInfo);
2540 if (!isSupported)
2541 {
2542 return false;
2543 }
2544
2545 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2546 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2547
2548 if (endLayer)
2549 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002550 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002551 if (!isReshapeSupported)
2552 {
2553 return false;
2554 }
2555
Mike Kelly46272802019-08-14 17:00:48 +01002556 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2557 }
2558 return Fail("%s: ProcessActivation failed", __func__);
2559}
2560
2561template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002562 typename HalOperation = typename HalPolicy::Operation,
2563 typename HalModel = typename HalPolicy::Model>
2564bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002565{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002566 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002567
2568 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2569 if (!input.IsValid())
2570 {
2571 return Fail("%s: Operation has invalid inputs", __func__);
2572 }
2573
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002574 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002575 if (!outputOperand)
2576 {
2577 return Fail("%s: Operation has invalid outputs", __func__);
2578 }
2579
2580 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2581 if (IsDynamicTensor(outputInfo))
2582 {
2583 return Fail("%s: Dynamic output tensors are not supported", __func__);
2584 }
2585
2586 bool isSupported = false;
2587 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2588 IsFloorSupported,
2589 data.m_Backends,
2590 isSupported,
2591 input.GetTensorInfo(),
2592 outputInfo);
2593 if (!isSupported)
2594 {
2595 return false;
2596 }
2597
2598 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2599 assert(layer != nullptr);
2600 input.Connect(layer->GetInputSlot(0));
2601
2602 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2603}
2604
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002605inline bool IsQSymm8(const V1_0::Operand&)
2606{
2607 return false;
2608}
2609
Kevin May42477c12020-03-26 13:34:14 +00002610#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002611
2612inline bool IsQSymm8(const V1_2::Operand& operand)
2613{
2614 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2615}
2616
2617#endif
2618
Kevin May42477c12020-03-26 13:34:14 +00002619#ifdef ARMNN_ANDROID_NN_V1_3
2620
2621inline bool IsQSymm8(const V1_3::Operand& operand)
2622{
2623 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2624}
2625
2626#endif
2627
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002628enum class DequantizeStatus
2629{
2630 SUCCESS,
2631 NOT_REQUIRED,
2632 INVALID_OPERAND
2633};
2634
2635using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2636
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002637template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002638 typename HalOperation = typename HalPolicy::Operation,
2639 typename HalModel = typename HalPolicy::Model>
2640DequantizeResult DequantizeIfRequired(size_t operand_index,
2641 const HalOperation& operation,
2642 const HalModel& model,
2643 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002644{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002645 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002646
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002647 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002648 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002649 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002650 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002651 }
2652
2653 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2654 {
2655 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002656 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002657 }
2658
2659 const size_t weightsInputIndex = operation.inputs[operand_index];
2660
2661 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2662 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002663 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002664 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002665 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002666 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002667 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2668 {
2669 continue;
2670 }
2671
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002672 size_t outOpIndex = weightsInputIndex + 1;
2673 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002674 {
2675 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002676 }
2677
2678 if (outOpIndex != weightsInputIndex)
2679 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002680 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002681 }
2682
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002683 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002684 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002685
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002686 if (!IsQSymm8(*operand))
2687 {
2688 // Only supporting dequantize from QSYMM8 to FLOAT
2689 break;
2690 }
2691
2692 // Allocate a new buffer for the dequantized data and manually dequantize
2693 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2694 if (!startValue)
2695 {
2696 // Failed to get the operand address
2697 break;
2698 }
2699
2700 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2701 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002702 const float quantizationScale = operand->scale;
2703
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002704 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2705 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2706 {
2707 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002708 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002709 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2710 }
2711
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002712 // Construct tensor info for dequantized ConstTensor
2713 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2714 operand->dimensions.data(),
2715 armnn::DataType::Float32);
2716
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002717 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2718 std::move(tensorInfo),
2719 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002720 }
2721
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002722 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002723}
2724
2725template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002726 typename HalOperation = typename HalPolicy::Operation,
2727 typename HalModel = typename HalPolicy::Model>
2728ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2729 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002730 const ConversionData& data,
2731 size_t operandIndex,
2732 bool optional = false)
2733{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002734 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2735
2736 DequantizeStatus status = std::get<3>(dequantized);
2737 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002738 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002739 case DequantizeStatus::INVALID_OPERAND:
2740 {
2741 // return invalid const tensor pin
2742 return ConstTensorPin();
2743 }
2744 case DequantizeStatus::NOT_REQUIRED:
2745 {
2746 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2747 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2748 }
2749 case DequantizeStatus::SUCCESS:
2750 default:
2751 {
2752 return ConstTensorPin(
2753 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2754 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002755 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002756}
2757
2758
Mike Kelly46272802019-08-14 17:00:48 +01002759template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002760 typename HalOperation = typename HalPolicy::Operation,
2761 typename HalModel = typename HalPolicy::Model>
2762bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002763{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002764 using HalOperand = typename HalPolicy::Operand;
2765
Mike Kelly46272802019-08-14 17:00:48 +01002766 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2767 if (!input.IsValid())
2768 {
2769 return Fail("%s: Operation has invalid inputs", __func__);
2770 }
2771
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002772 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002773 if (!output)
2774 {
2775 return Fail("%s: Could not read output 0", __func__);
2776 }
2777
2778 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2779 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2780
2781 if (IsDynamicTensor(outputInfo))
2782 {
2783 return Fail("%s: Dynamic output tensors are not supported", __func__);
2784 }
2785
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002786 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2787 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002788
2789 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002790 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002791 return Fail("%s: Operation has invalid weights", __func__);
2792 }
2793
2794 if (!biasPin.IsValid())
2795 {
2796 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002797 }
2798
2799 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2800 armnn::ConstTensor bias = biasPin.GetConstTensor();
2801 armnn::TensorInfo reshapedInfo = inputInfo;
2802
2803 try
2804 {
2805 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002806 }
2807 catch (const std::exception& e)
2808 {
Mike Kelly46272802019-08-14 17:00:48 +01002809 return Fail("%s: %s", __func__, e.what());
2810 }
2811
2812 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2813 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2814
2815 ActivationFn activationFunction;
2816 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2817 {
2818 return Fail("%s: Operation has invalid inputs", __func__);
2819 }
2820
2821 armnn::FullyConnectedDescriptor desc;
2822 desc.m_TransposeWeightMatrix = true;
2823 desc.m_BiasEnabled = true;
2824
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002825 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2826 weights.GetInfo().GetShape(),
2827 outputInfo.GetShape(),
2828 desc.m_TransposeWeightMatrix))
2829 {
2830 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2831 }
2832
Mike Kelly46272802019-08-14 17:00:48 +01002833 bool isSupported = false;
2834 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2835 IsFullyConnectedSupported,
2836 data.m_Backends,
2837 isSupported,
2838 reshapedInfo,
2839 outputInfo,
2840 weights.GetInfo(),
2841 bias.GetInfo(),
2842 desc);
2843 if (!isSupported)
2844 {
2845 return false;
2846 }
2847
2848 armnn::IConnectableLayer* startLayer =
2849 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2850 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2851
2852 if (endLayer != nullptr)
2853 {
2854 if (inputInfo.GetNumDimensions() > 2U)
2855 {
2856 armnn::ReshapeDescriptor reshapeDescriptor;
2857 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2858
2859 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2860 assert(reshapeLayer != nullptr);
2861 input.Connect(reshapeLayer->GetInputSlot(0));
2862 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2863 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2864 }
2865 else
2866 {
2867 input.Connect(startLayer->GetInputSlot(0));
2868 }
2869
2870 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2871 }
2872 else
2873 {
2874 return Fail("%s: ProcessActivation failed", __func__);
2875 }
2876}
2877
2878template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002879 typename HalOperation = typename HalPolicy::Operation,
2880 typename HalModel = typename HalPolicy::Model>
2881bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002882{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002883 using HalOperand = typename HalPolicy::Operand;
2884
Mike Kelly999e2092019-08-15 10:46:46 +01002885 if (operation.inputs.size() != 1)
2886 {
2887 return Fail("%s: Optional inputs are not supported", __func__);
2888 }
2889
Mike Kelly46272802019-08-14 17:00:48 +01002890 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2891 if (!input.IsValid())
2892 {
2893 return Fail("%s: Operation has invalid inputs", __func__);
2894 }
2895
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002896 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002897 if (!output)
2898 {
2899 return Fail("%s: Could not read output 0", __func__);
2900 }
2901
2902 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2903 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2904
2905 if (IsDynamicTensor(outputInfo))
2906 {
2907 return Fail("%s: Dynamic output tensors are not supported", __func__);
2908 }
2909 if (outputInfo.GetNumDimensions() != 4u)
2910 {
2911 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2912 }
2913
2914 armnn::L2NormalizationDescriptor desc;
2915 desc.m_DataLayout = armnn::DataLayout::NHWC;
2916
2917 bool isSupported = false;
2918 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2919 IsL2NormalizationSupported,
2920 data.m_Backends,
2921 isSupported,
2922 inputInfo,
2923 outputInfo,
2924 desc);
2925 if (!isSupported)
2926 {
2927 return false;
2928 }
2929
2930 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2931 assert(layer != nullptr);
2932 input.Connect(layer->GetInputSlot(0));
2933
2934 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2935}
2936
2937template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002938 typename HalOperation = typename HalPolicy::Operation,
2939 typename HalModel = typename HalPolicy::Model>
2940bool ConvertLocalResponseNormalization(const HalOperation& operation,
2941 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002942 ConversionData& data)
2943{
Mike Kelly999e2092019-08-15 10:46:46 +01002944 if (operation.inputs.size() != 5)
2945 {
2946 return Fail("%s: Optional inputs are not supported", __func__);
2947 }
2948
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002949 using HalOperand = typename HalPolicy::Operand;
2950 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002951
2952 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2953 if (!input.IsValid())
2954 {
2955 return Fail("%s: Operation has invalid inputs", __func__);
2956 }
2957
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002958 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002959 if (!output)
2960 {
2961 return Fail("%s: Could not read output 0", __func__);
2962 }
2963
2964 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2965 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2966
2967 if (IsDynamicTensor(outputInfo))
2968 {
2969 return Fail("%s: Dynamic output tensors are not supported", __func__);
2970 }
2971 if (outputInfo.GetNumDimensions() != 4u)
2972 {
2973 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2974 }
2975
2976 armnn::NormalizationDescriptor descriptor;
2977 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2978 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2979 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2980
2981 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002982 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002983 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2984 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2985 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2986 {
2987 return Fail("%s: Operation has invalid inputs", __func__);
2988 }
2989
2990 // ArmNN expects normSize to be the full size of the normalization
2991 // window rather than the radius as in AndroidNN.
2992 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2993
2994 bool isSupported = false;
2995 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2996 IsNormalizationSupported,
2997 data.m_Backends,
2998 isSupported,
2999 inputInfo,
3000 outputInfo,
3001 descriptor);
3002 if (!isSupported)
3003 {
3004 return false;
3005 }
3006
3007
3008 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3009 assert(layer != nullptr);
3010 input.Connect(layer->GetInputSlot(0));
3011
3012 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3013}
3014
3015template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003016 typename HalOperation = typename HalPolicy::Operation,
3017 typename HalModel = typename HalPolicy::Model>
3018bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003019{
Mike Kelly46272802019-08-14 17:00:48 +01003020 armnn::ActivationDescriptor desc;
3021 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3022
3023 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3024}
3025
3026template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003027 typename HalOperation = typename HalPolicy::Operation,
3028 typename HalModel = typename HalPolicy::Model>
3029bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003030{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003031 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003032
3033 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3034 if (!input.IsValid())
3035 {
3036 return Fail("%s: Operation has invalid inputs", __func__);
3037 }
3038
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003039 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003040 if (!output)
3041 {
3042 return Fail("%s: Could not read output 0", __func__);
3043 }
3044
3045 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3046 if (IsDynamicTensor(outputInfo))
3047 {
3048 return Fail("%s: Dynamic output tensors are not supported", __func__);
3049 }
3050
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003051 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003052 if (!axisOperand)
3053 {
3054 return Fail("%s: Could not read input 1", __func__);
3055 }
3056
3057 std::vector<int32_t> axis;
3058 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3059 {
3060 return Fail("%s: Input 1 has invalid values", __func__);
3061 }
3062
3063 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3064
3065 // Convert the axis to unsigned int and remove duplicates.
3066 unsigned int rank = inputInfo.GetNumDimensions();
3067 std::set<unsigned int> uniqueAxis;
3068 std::transform(axis.begin(), axis.end(),
3069 std::inserter(uniqueAxis, uniqueAxis.begin()),
3070 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3071
3072 // Get the "keep dims" flag.
3073 int32_t keepDims = 0;
3074 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3075 {
3076 return Fail("%s: Could not read input 2", __func__);
3077 }
3078
3079 armnn::MeanDescriptor descriptor;
3080 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3081 descriptor.m_KeepDims = keepDims > 0;
3082
3083 bool isSupported = false;
3084 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3085 IsMeanSupported,
3086 data.m_Backends,
3087 isSupported,
3088 inputInfo,
3089 outputInfo,
3090 descriptor);
3091 if (!isSupported)
3092 {
3093 return false;
3094 }
3095
3096 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3097 assert(layer != nullptr);
3098 input.Connect(layer->GetInputSlot(0));
3099
3100 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3101}
3102
3103template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003104 typename HalOperation = typename HalPolicy::Operation,
3105 typename HalModel = typename HalPolicy::Model>
3106bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003107{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003108 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003109
3110 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3111 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3112
3113 if (!input0.IsValid() || !input1.IsValid())
3114 {
3115 return Fail("%s: Operation has invalid inputs", __func__);
3116 }
3117
3118 // The FuseActivation parameter is always the input index 2
3119 // and it should be optional
3120 ActivationFn activationFunction;
3121 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3122 {
3123 return Fail("%s: Operation has invalid inputs", __func__);
3124 }
3125
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003126 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003127
3128 if (outputOperand == nullptr)
3129 {
3130 return false;
3131 }
3132
3133 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3134 if (IsDynamicTensor(outputInfo))
3135 {
3136 return Fail("%s: Dynamic output tensors are not supported", __func__);
3137 }
3138
3139 bool isSupported = false;
3140 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3141 IsMultiplicationSupported,
3142 data.m_Backends,
3143 isSupported,
3144 input0.GetTensorInfo(),
3145 input1.GetTensorInfo(),
3146 outputInfo);
3147 if (!isSupported)
3148 {
3149 return false;
3150 }
3151
3152 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3153 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3154
3155 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3156 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3157
3158 if (endLayer != nullptr)
3159 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003160 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003161 if (!isReshapeSupported)
3162 {
3163 return false;
3164 }
3165
Mike Kelly46272802019-08-14 17:00:48 +01003166 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3167 }
3168 else
3169 {
3170 return Fail("%s: ProcessActivation failed", __func__);
3171 }
3172}
3173
3174template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003175 typename HalOperation = typename HalPolicy::Operation,
3176 typename HalModel = typename HalPolicy::Model>
3177bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003178{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003179 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003180
Mike Kelly3c673942019-07-25 09:26:06 +01003181 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3182 if (!input.IsValid())
3183 {
3184 return Fail("%s: Operation has invalid inputs", __func__);
3185 }
3186
3187 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3188 unsigned int rank = inputInfo.GetNumDimensions();
3189
3190 armnn::PadDescriptor descriptor;
3191 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3192 {
3193 return Fail("%s: Could not convert paddings", __func__);
3194 }
3195
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003196 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3197 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003198 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3199 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3200 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003201 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003202 {
3203 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3204 }
3205
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003206 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003207 if (!output)
3208 {
3209 return Fail("%s: Could not read output", __func__);
3210 }
3211
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003212 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003213 if (IsDynamicTensor(outputInfo))
3214 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003215 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003216 }
3217
3218 bool isSupported = false;
3219 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3220 IsPadSupported,
3221 data.m_Backends,
3222 isSupported,
3223 inputInfo,
3224 outputInfo,
3225 descriptor);
3226 if (!isSupported)
3227 {
3228 return false;
3229 }
3230
3231 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3232 assert(layer != nullptr);
3233 input.Connect(layer->GetInputSlot(0));
3234 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3235
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003236 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003237}
3238
Mike Kelly0a879362019-07-29 16:56:31 +01003239template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003240 typename HalOperation = typename HalPolicy::Operation,
3241 typename HalModel = typename HalPolicy::Model>
3242bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003243{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003244 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003245
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003246 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3247 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3248 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003249
3250 if (inputOperand == nullptr
3251 || requestedShapeOperand == nullptr
3252 || outputOperand == nullptr)
3253 {
3254 return Fail("%s: Operation has invalid inputs", __func__);
3255 }
3256
3257 if (requestedShapeOperand->dimensions.size() != 1)
3258 {
3259 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3260 __func__, requestedShapeOperand->dimensions.size());
3261 }
3262
3263 std::vector<int32_t> targetDimensions;
3264 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3265 {
3266 return Fail("%s: Could not read values of input 1", __func__);
3267 }
3268
3269 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3270
3271 Shape requestedShape;
3272 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3273 // function that resolves these values into a fully specified tensor shape.
3274 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3275 {
3276 return Fail("%s: Failed to resolve the requested shape", __func__);
3277 }
3278
3279 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3280 if (!SameShape(requestedShape, outputOperandShape))
3281 {
3282 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3283 }
3284
3285 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3286 if (!input.IsValid())
3287 {
3288 return Fail("%s: Could not read input 0", __func__);
3289 }
3290
3291 armnn::ReshapeDescriptor reshapeDescriptor;
3292 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3293 requestedShape.dimensions.data());
3294
3295 bool isSupported = false;
3296 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3297 IsReshapeSupported,
3298 data.m_Backends,
3299 isSupported,
3300 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003301 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003302 reshapeDescriptor);
3303 if (!isSupported)
3304 {
3305 return false;
3306 }
3307
3308 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3309 assert(layer != nullptr);
3310 input.Connect(layer->GetInputSlot(0));
3311
3312 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3313}
3314
3315template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003316 typename HalOperation = typename HalPolicy::Operation,
3317 typename HalModel = typename HalPolicy::Model>
3318bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003319{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003320 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003321
Mike Kelly0a879362019-07-29 16:56:31 +01003322 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3323 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3324
3325 if (!input0.IsValid() || !input1.IsValid())
3326 {
3327 return Fail("%s: Operation has invalid inputs", __func__);
3328 }
3329
3330 // The FuseActivation parameter is always the input index 2
3331 // and it should be optional
3332 ActivationFn activationFunction;
3333 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3334 {
3335 return Fail("%s: Operation has invalid inputs", __func__);
3336 }
3337
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003338 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003339 if (!output)
3340 {
3341 return Fail("%s: Could not read output 0", __func__);
3342 }
3343
3344 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3345 if (IsDynamicTensor(outputInfo))
3346 {
3347 return Fail("%s: Dynamic output tensors are not supported", __func__);
3348 }
3349
3350 bool isSupported = false;
3351 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3352 IsSubtractionSupported,
3353 data.m_Backends,
3354 isSupported,
3355 input0.GetTensorInfo(),
3356 input1.GetTensorInfo(),
3357 outputInfo);
3358 if (!isSupported)
3359 {
3360 return false;
3361 }
3362
3363 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3364 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3365
3366 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3367 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3368
3369 if (endLayer)
3370 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003371 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003372 if (!isReshapeSupported)
3373 {
3374 return false;
3375 }
Mike Kelly0a879362019-07-29 16:56:31 +01003376 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3377 }
3378
3379 return Fail("%s: ProcessActivation failed", __func__);
3380}
3381
Finn Williams23b87b32019-07-30 11:44:05 +01003382template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003383 typename HalOperation = typename HalPolicy::Operation,
3384 typename HalModel = typename HalPolicy::Model>
3385bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003386{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003387 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003388
3389 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3390 if (!input.IsValid())
3391 {
3392 return Fail("%s: Operation has invalid inputs", __func__);
3393 }
3394
3395 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3396 unsigned int rank = inputInfo.GetNumDimensions();
3397 if (rank > 4)
3398 {
3399 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3400 }
3401
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003402 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003403 if (!output)
3404 {
3405 return Fail("%s: Could not read output 0", __func__);
3406 }
3407
3408 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3409 {
3410 return Fail("%s: Dynamic output tensors are not supported", __func__);
3411 }
3412
3413 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3414 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003415 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003416
3417 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3418
3419 std::vector<int32_t> axis;
3420 if (!axisOperand)
3421 {
3422 axis.assign(dimensionSequence,
3423 dimensionSequence + rank);
3424 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003425 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003426 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003427 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003428 }
3429
3430 std::vector<uint32_t> outputDims;
3431 for (unsigned int i = 0; i < rank; i++)
3432 {
3433 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3434 auto currentDimension = inputInfo.GetShape()[i];
3435 if (skipSqueeze || currentDimension != 1)
3436 {
3437 outputDims.push_back(currentDimension);
3438 }
3439 }
3440
3441 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3442
3443 armnn::TensorInfo outputInfo = inputInfo;
3444 outputInfo.SetShape(outShape);
3445
3446 armnn::ReshapeDescriptor reshapeDesc;
3447 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3448
3449 bool isSupported = false;
3450 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3451 IsReshapeSupported,
3452 data.m_Backends,
3453 isSupported,
3454 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003455 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003456 reshapeDesc);
3457 if (!isSupported)
3458 {
3459 return false;
3460 }
3461
3462 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3463 assert(layer != nullptr);
3464 input.Connect(layer->GetInputSlot(0));
3465
3466 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3467}
3468
3469template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003470 typename HalOperation = typename HalPolicy::Operation,
3471 typename HalModel = typename HalPolicy::Model>
3472bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003473{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003474 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003475
3476 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3477 if (!input.IsValid())
3478 {
3479 return Fail("%s: Operation has invalid inputs", __func__);
3480 }
3481
3482 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3483 unsigned int rank = inputInfo.GetNumDimensions();
3484 if (rank > 4)
3485 {
3486 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3487 }
3488
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003489 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003490 if (!output)
3491 {
3492 return Fail("%s: Could not read output 0", __func__);
3493 }
3494
3495 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3496 if (IsDynamicTensor(outputInfo))
3497 {
3498 return Fail("%s: Dynamic output tensors are not supported", __func__);
3499 }
3500
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003501 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3502 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3503 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003504
3505 std::vector<int32_t> beginValues;
3506 std::vector<int32_t> endValues;
3507 std::vector<int32_t> stridesValues;
3508
3509 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003510 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003511 {
3512 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3513 {
3514 return false;
3515 }
3516
3517 if (operandValues.size() != rank)
3518 {
3519 return false;
3520 }
3521
3522 return true;
3523 };
3524
3525 if (!ValidateInputOperands(*beginOperand, beginValues)
3526 || !ValidateInputOperands(*endOperand, endValues)
3527 || !ValidateInputOperands(*stridesOperand, stridesValues))
3528 {
3529 return Fail("%s: Operation has invalid input operand", __func__);
3530 }
3531
3532 // Stride cannot have value '0'
3533 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3534 {
3535 return Fail("%s: Stride must be non-zero value.", __func__);
3536 }
3537
3538 armnn::StridedSliceDescriptor descriptor;
3539 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3540 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3541 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3542 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3543
3544 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3545 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3546 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3547 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3548 {
3549 return Fail("%s: Operation has invalid inputs", __func__);
3550 }
3551
3552 bool isSupported = false;
3553 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3554 IsStridedSliceSupported,
3555 data.m_Backends,
3556 isSupported,
3557 inputInfo,
3558 outputInfo,
3559 descriptor);
3560 if (!isSupported)
3561 {
3562 return false;
3563 }
3564
3565 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3566 assert(layer != nullptr);
3567 input.Connect(layer->GetInputSlot(0));
3568
3569 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3570}
3571
3572template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003573 typename HalOperation = typename HalPolicy::Operation,
3574 typename HalModel = typename HalPolicy::Model>
3575bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003576{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003577 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003578
3579 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3580 if (!input.IsValid())
3581 {
3582 return Fail("%s: Operation has invalid inputs", __func__);
3583 }
3584
3585 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3586 unsigned int rank = inputInfo.GetNumDimensions();
3587 if (rank > 4)
3588 {
3589 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3590 }
3591
3592 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3593 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003594 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003595
3596 std::vector<int32_t> perm(rank);
3597 if (!permOperand)
3598 {
3599 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3600 for (unsigned int i = rank; i > 0; i--)
3601 {
3602 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3603 }
3604 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003605 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003606 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003607 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003608 }
3609
3610 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3611
Mike Kelly4a956582020-02-28 10:32:09 +00003612 armnn::TransposeDescriptor transposeDesc;
3613 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003614
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003615 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003616 if (!output)
3617 {
3618 return Fail("%s: Could not read output 0", __func__);
3619 }
3620
3621 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003622 if (IsDynamicTensor(outputInfo))
3623 {
3624 return Fail("%s: Dynamic output tensors are not supported", __func__);
3625 }
3626
Mike Kelly46272802019-08-14 17:00:48 +01003627
3628 bool isSupported = false;
3629 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003630 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003631 data.m_Backends,
3632 isSupported,
3633 inputInfo,
3634 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003635 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003636 if (!isSupported)
3637 {
3638 return false;
3639 }
3640
Mike Kelly4a956582020-02-28 10:32:09 +00003641 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003642 assert(layer != nullptr);
3643 input.Connect(layer->GetInputSlot(0));
3644
3645 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3646}
3647
3648template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003649 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003650 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003651 typename HalModel = typename HalPolicy::Model>
3652bool ConvertBatchToSpaceNd(const HalOperation& operation,
3653 const HalModel& model,
3654 ConversionData& data)
3655{
Finn Williams23b87b32019-07-30 11:44:05 +01003656
3657 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3658 if (!input.IsValid())
3659 {
3660 return Fail("%s: Operation has invalid inputs", __func__);
3661 }
3662
3663 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3664 if (!output)
3665 {
3666 return Fail("%s: Could not read output 0", __func__);
3667 }
3668
3669 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3670 if (IsDynamicTensor(outputInfo))
3671 {
3672 return Fail("%s: Dynamic output tensors are not supported", __func__);
3673 }
3674
3675 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3676 if (!blockOperand)
3677 {
3678 return Fail("%s: Could not read input 1", __func__);
3679 }
3680
3681 // Convert the block operand to int32
3682 std::vector<int32_t> block;
3683 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3684 {
3685 return Fail("%s: Input 1 has invalid values", __func__);
3686 }
3687
3688 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3689
3690 unsigned int rank = inputInfo.GetNumDimensions();
3691 if (rank != 4)
3692 {
3693 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3694 }
3695
3696 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3697 {
3698 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3699 " greater than or equal to 1", __func__);
3700 }
3701
3702 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3703 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3704 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3705
Kevin May42477c12020-03-26 13:34:14 +00003706 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003707 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003708 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003709 }
3710 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3711 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3712
3713 bool isSupported = false;
3714 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3715 IsBatchToSpaceNdSupported,
3716 data.m_Backends,
3717 isSupported,
3718 inputInfo,
3719 outputInfo,
3720 batchToSpaceNdDesc);
3721 if (!isSupported)
3722 {
3723 return false;
3724 }
3725
3726 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3727 assert(layer != nullptr);
3728 input.Connect(layer->GetInputSlot(0));
3729
3730 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3731}
Mike Kelly0a879362019-07-29 16:56:31 +01003732
Finn Williamsd74c5052019-07-30 17:06:00 +01003733template<typename HalPolicy,
3734 typename HalOperation = typename HalPolicy::Operation,
3735 typename HalOperand = typename HalPolicy::Operand,
3736 typename HalModel = typename HalPolicy::Model>
3737bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3738{
3739 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3740 if (!input.IsValid())
3741 {
3742 return Fail("%s: Operation has invalid inputs", __func__);
3743 }
3744
3745 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3746 unsigned int rank = inputInfo.GetNumDimensions();
3747 unsigned int spatialDim = rank - 2;
3748
3749 if (rank != 4)
3750 {
3751 Fail("%s: Only inputs with rank 4 are supported", __func__);
3752 }
3753
3754 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3755 if (!output)
3756 {
3757 return Fail("%s: Could not read output 0", __func__);
3758 }
3759
3760 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3761 if (IsDynamicTensor(outputInfo))
3762 {
3763 return Fail("%s: Dynamic output tensors are not supported", __func__);
3764 }
3765
3766 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3767 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3768
3769 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3770 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3771 {
3772 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3773 }
3774
3775 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003776 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3777 {
3778 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3779 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003780 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3781 {
3782 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3783 }
3784
3785 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3786 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3787 {
3788 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3789 }
3790
3791 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3792 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003793 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3794 {
3795 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3796 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003797 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3798 {
3799 int paddingBeforeInput = paddings[i];
3800 int paddingAfterInput = paddings[i + 1];
3801 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3802 {
3803 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3804 }
3805
3806 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3807 }
3808
3809 armnn::SpaceToBatchNdDescriptor descriptor;
3810 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3811 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3812 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3813
Kevin May42477c12020-03-26 13:34:14 +00003814 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01003815 {
3816 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3817 }
3818
3819 bool isSupported = false;
3820 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3821 IsSpaceToBatchNdSupported,
3822 data.m_Backends,
3823 isSupported,
3824 inputInfo,
3825 outputInfo,
3826 descriptor);
3827 if (!isSupported)
3828 {
3829 return false;
3830 }
3831
3832 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3833 assert(layer != nullptr);
3834 input.Connect(layer->GetInputSlot(0));
3835
3836 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3837}
3838
saoste01b8471482018-10-10 09:44:51 +01003839} // namespace armnn_driver