blob: 315089c27f3fb7d0f662db207d2cad6a7ba4fc6b [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000013#include <armnn/utility/IgnoreUnused.hpp>
arovir01b0717b52018-09-05 17:03:25 +010014
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000015#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000016#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010017
Mike Kelly46272802019-08-14 17:00:48 +010018#include "1.0/FullyConnected.hpp"
19
arovir01b0717b52018-09-05 17:03:25 +010020#include <ActivationFunctor.h>
21#include <CpuExecutor.h>
22#include <OperationsUtils.h>
23
24#include <boost/assert.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Kevin May42477c12020-03-26 13:34:14 +0000186#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000192 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000193 type == V1_2::OperandType::TENSOR_FLOAT16 ||
194 type == V1_2::OperandType::TENSOR_FLOAT32 ||
195 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000197 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
198 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199 type == V1_2::OperandType::TENSOR_INT32;
200}
201
202#endif
203
Kevin May42477c12020-03-26 13:34:14 +0000204#ifdef ARMNN_ANDROID_NN_V1_3
205
206// Support within the 1.3 driver for specific tensor data types
207inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
208{
209 return type == V1_3::OperandType::BOOL ||
210 type == V1_3::OperandType::TENSOR_FLOAT16 ||
211 type == V1_3::OperandType::TENSOR_FLOAT32 ||
212 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
213 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
214 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
215 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
216 type == V1_3::OperandType::TENSOR_INT32;
217}
218
219#endif
220
Mike Kellyb5fdf382019-06-11 16:35:25 +0100221inline bool IsBool(V1_0::Operand)
222{
223 return false;
224}
225
Kevin May42477c12020-03-26 13:34:14 +0000226inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100227{
228 return false;
229}
230
Kevin May42477c12020-03-26 13:34:14 +0000231#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100232
233inline bool IsBool(V1_2::Operand operand)
234{
235 return operand.type == V1_2::OperandType::BOOL;
236}
237
Sadik Armagan61113162019-07-25 09:09:40 +0100238/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000239inline bool Is12OrLaterOperand(V1_2::Operand)
240{
241 return true;
242}
243
244#endif
245
246#ifdef ARMNN_ANDROID_NN_V1_3
247
248inline bool IsBool(V1_3::Operand operand)
249{
250 return operand.type == V1_3::OperandType::BOOL;
251}
252
253/// Checks if a operand is 1_2 Operand
254inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100255{
256 return true;
257}
258
Mike Kellyb5fdf382019-06-11 16:35:25 +0100259#endif
260
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100261template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000262armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
263 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100264 armnn::TensorInfo reshapeInfo)
265{
266 armnn::ReshapeDescriptor reshapeDescriptor;
267 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
268
269 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
270 BOOST_ASSERT(reshapeLayer != nullptr);
271
272 // Attach the input layer to the reshape layer
273 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
274 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
275
276 return *reshapeLayer;
277}
278
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000279bool BroadcastTensor(LayerInputHandle& input0,
280 LayerInputHandle& input1,
281 armnn::IConnectableLayer* startLayer,
282 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100283{
284 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100285
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100286 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
287 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
288
289 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
290 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
291
292 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100293 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100294 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
295 input0.Connect(startLayer->GetInputSlot(0));
296 input1.Connect(startLayer->GetInputSlot(1));
297
Sadik Armagan64b19b52019-08-19 09:49:58 +0100298 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100299 }
300
301 // Since the number of dimensions do not match then we need to add degenerate dimensions
302 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
303
304 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
305 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
306 boost::numeric_cast<int>(inputDimensions1));
307
308 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
309 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
310 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
311
312 const armnn::TensorShape& smallShape = smallInfo.GetShape();
313 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
314 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
315 {
316 reshapedDimensions[i] = smallShape[i - sizeDifference];
317 }
318
319 armnn::TensorInfo reshapedInfo = smallInfo;
320 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
321 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100322
323 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
324 armnn::ReshapeDescriptor reshapeDescriptor;
325
326 bool isSupported = false;
327 FORWARD_LAYER_SUPPORT_FUNC(__func__,
328 IsReshapeSupported,
329 data.m_Backends,
330 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000331 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100332 reshapedInfo,
333 reshapeDescriptor);
334 if (!isSupported)
335 {
336 return false;
337 }
338
339 BOOST_ASSERT(data.m_Network != nullptr);
340 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100341
342 if (input0IsSmaller)
343 {
344 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
345 //
346 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100347 // | |
348 // Reshape |
349 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100350 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100351
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100352 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
353 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100354 }
355 else
356 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100357 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
358 //
359 // Input0 Input1
360 // | |
361 // | Reshape
362 // \ /
363 // StartLayer
364
arovir01b0717b52018-09-05 17:03:25 +0100365 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100366 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100367 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100368
369 return true;
arovir01b0717b52018-09-05 17:03:25 +0100370}
371
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000372void CalcPadding(uint32_t input,
373 uint32_t kernel,
374 uint32_t stride,
375 uint32_t& outPadHead,
376 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100377 android::nn::PaddingScheme scheme)
378{
379 int32_t padHead;
380 int32_t padTail;
381 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
382 outPadHead = boost::numeric_cast<uint32_t>(padHead);
383 outPadTail = boost::numeric_cast<uint32_t>(padTail);
384}
385
Kevin May42477c12020-03-26 13:34:14 +0000386#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100387
388void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
389 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
390{
391 int32_t padHead;
392 int32_t padTail;
393 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
394 outPadHead = boost::numeric_cast<uint32_t>(padHead);
395 outPadTail = boost::numeric_cast<uint32_t>(padTail);
396}
397
Mike Kelly26123db2020-01-15 10:02:33 +0000398void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100399 int32_t& outPadTail, android::nn::PaddingScheme scheme)
400{
401 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
402}
403
Mike Kelly86b36d42019-07-12 16:39:33 +0100404#endif
405
Matthew Bentham912b3622019-05-03 15:49:14 +0100406Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100407{
408 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100409 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100410 shape.dimensions = operand.dimensions;
411 shape.scale = operand.scale;
412 shape.offset = operand.zeroPoint;
413 return shape;
414}
415
Kevin May42477c12020-03-26 13:34:14 +0000416#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100417
418Shape GetOperandShape(const V1_2::Operand& operand)
419{
420 Shape shape;
421 shape.type = OperandType(operand.type);
422 shape.dimensions = operand.dimensions;
423 shape.scale = operand.scale;
424 shape.offset = operand.zeroPoint;
425 return shape;
426}
427
428#endif
429
Kevin May42477c12020-03-26 13:34:14 +0000430#ifdef ARMNN_ANDROID_NN_V1_3
431
432Shape GetOperandShape(const V1_3::Operand& operand)
433{
434 Shape shape;
435 shape.type = OperandType(operand.type);
436 shape.dimensions = operand.dimensions;
437 shape.scale = operand.scale;
438 shape.offset = operand.zeroPoint;
439 return shape;
440}
441
442#endif
443
arovir01b0717b52018-09-05 17:03:25 +0100444// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
445// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100446// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
447// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100448void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000449 const armnn::TensorInfo& weightInfo,
450 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100451{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000452 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100453 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000454 // NOTE: Bias scale is always set to 0 for per-axis quantization and
455 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
456 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100457 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000458 return biasScale * inputInfo.GetQuantizationScale();
459 };
460
461 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
462 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
463
464 biasInfo.SetQuantizationScales(biasScales);
465 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
466
467 ALOGV("Bias quantization params have been updated for per-axis quantization");
468 }
469 else
470 {
471 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
472 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
473 {
474 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
475 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
476 {
477 ALOGW("Bias quantization scale has been modified to match input * weights");
478 biasInfo.SetQuantizationScale(expectedBiasScale);
479 }
arovir01b0717b52018-09-05 17:03:25 +0100480 }
481 }
482}
483
484// 4D Tensor Permutations
485const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100486const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
487
488// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000489const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
490const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100491
492template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000493armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
494 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100495{
496 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000497 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100498
499 BOOST_ASSERT(layer != nullptr);
500
501 // Connect input to swizzle layer
502 input.Connect(layer->GetInputSlot(0));
503
504 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000505 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100506 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
507
508 return *layer;
509}
510
arovir01b0717b52018-09-05 17:03:25 +0100511bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
512 const armnn::TensorShape & outputShape,
513 uint32_t concatDim)
514{
515 // Validate the output shape is correct given the input shapes (which have just been validated)
516 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
517 if (outputShape.GetNumDimensions() != numDimensions)
518 {
519 return Fail("%s: Output shape has wrong number of dimensions", __func__);
520 }
521
522 unsigned int outputSizeAlongConcatenatedDimension = 0;
523 for (unsigned int i = 0; i < inputShapes.size(); i++)
524 {
525 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
526 }
527
528 for (unsigned int i = 0; i < numDimensions; ++i)
529 {
530 if (i == concatDim)
531 {
532 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
533 {
534 return Fail(
535 "%s: Invalid output shape for dimension %d (%d != %d)",
536 __func__,
537 i,
538 outputShape[i],
539 outputSizeAlongConcatenatedDimension);
540 }
541 }
542 else
543 {
544 if (outputShape[i] != inputShapes[0][i])
545 {
546 return Fail("%s: Invalid output shape", __func__);
547 }
548 }
549 }
550
551 return true;
552}
553
554bool RequiresReshape(armnn::TensorShape & inputShape)
555{
556 return inputShape.GetNumDimensions() < 3;
557}
558
arovir01b0717b52018-09-05 17:03:25 +0100559void SwizzleInputs(armnn::INetwork& network,
560 std::vector<LayerInputHandle>& inputs,
561 std::vector<armnn::TensorShape>& inputShapes,
562 const armnn::PermutationVector& mapping)
563{
564 if (!mapping.IsEqual(IdentityPermutation4D))
565 {
566 size_t nInputs = inputs.size();
567 for (size_t i=0; i<nInputs; ++i)
568 {
569 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000570 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100571 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
572 auto& outputInfo = outputSlot.GetTensorInfo();
573 // replace inputs with the swizzled ones
574 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
575 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
576 }
577 }
578}
579
Kevin Mayaed08ac2019-12-12 16:33:31 +0000580bool CheckReshapeSupported(ConversionData& data,
581 std::vector<LayerInputHandle>& inputs,
582 std::vector<armnn::TensorShape>& inputShapes,
583 const armnn::PermutationVector& mapping,
584 const armnn::TensorInfo& outputInfo)
585{
586 if (!mapping.IsEqual(IdentityPermutation4D))
587 {
588 size_t nInputs = inputs.size();
589 for (size_t i=0; i<nInputs; ++i)
590 {
591 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000592 armnn::TransposeDescriptor transposeDesc;
593 transposeDesc.m_DimMappings = mapping;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000594
595 bool isSupported = false;
596 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000597 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000598 data.m_Backends,
599 isSupported,
600 inputs[i].GetTensorInfo(),
601 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000602 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000603 if (!isSupported)
604 {
605 return false;
606 }
607
608 }
609 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
610 }
611 return true;
612}
613
614
narpra01f176d5a2018-11-18 20:17:48 +0000615bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
616 int32_t & concatDimension,
617 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100618{
narpra01f176d5a2018-11-18 20:17:48 +0000619 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100620 BOOST_ASSERT(numberOfDimensions >= 3);
621
622 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000623 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
624 // or along dimension 0 or 2 for a 3-D tensor.
625 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100626 {
narpra01f176d5a2018-11-18 20:17:48 +0000627 concatDimension = 1;
628 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
629 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100630 }
narpra01f176d5a2018-11-18 20:17:48 +0000631 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100632 {
narpra01f176d5a2018-11-18 20:17:48 +0000633 concatDimension = 0;
634 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
635 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100636 }
narpra01f176d5a2018-11-18 20:17:48 +0000637 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100638}
639
640} // anonymous namespace
641
642namespace armnn_driver
643{
644
645//// Creates an ArmNN activation layer and connects it to the given layer, if the
646//// passed in AndroidNN activation function requires so.
647//// @return The end layer of the sequence of layers built for the given AndroidNN
648//// activation function or nullptr if an error occurred (e.g. unsupported activation).
649//// Note that the end layer matches the input layer if no activation is required
650//// (the sequence of layers has length 1).
651armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
652 ActivationFn activation,
653 armnn::IConnectableLayer* prevLayer,
654 ConversionData& data);
655
656} // namespace armnn_driver
657
658///
659/// Utility templates
660///
661
662namespace armnn_driver
663{
664
665using namespace android::nn;
666
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100667template<typename HalPolicy,
668 typename HalOperand = typename HalPolicy::Operand,
669 typename HalOperation = typename HalPolicy::Operation,
670 typename HalModel = typename HalPolicy::Model>
671const HalOperand* GetInputOperand(const HalOperation& operation,
672 uint32_t inputIndex,
673 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100674 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100675{
676 if (inputIndex >= operation.inputs.size())
677 {
saoste01b8471482018-10-10 09:44:51 +0100678 if (failOnIndexOutOfBounds)
679 {
680 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
681 }
arovir01b0717b52018-09-05 17:03:25 +0100682 return nullptr;
683 }
684
Kevin May42477c12020-03-26 13:34:14 +0000685 // Model should have been validated beforehand
686 BOOST_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
687 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100688}
689
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100690template<typename HalPolicy,
691 typename HalOperand = typename HalPolicy::Operand,
692 typename HalOperation = typename HalPolicy::Operation,
693 typename HalModel = typename HalPolicy::Model>
694const HalOperand* GetOutputOperand(const HalOperation& operation,
695 uint32_t outputIndex,
696 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100697{
698 if (outputIndex >= operation.outputs.size())
699 {
700 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
701 return nullptr;
702 }
703
704 // Model should have been validated beforehand
Kevin May42477c12020-03-26 13:34:14 +0000705 BOOST_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100706
Kevin May42477c12020-03-26 13:34:14 +0000707 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100708}
709
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100710template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100711 typename HalOperand = typename HalPolicy::Operand,
712 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100713const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100714 const HalModel& model,
715 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000716 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100717{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100718 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100719
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100720 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100721 switch (operand.lifetime)
722 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100724 {
725 // Constant found in model.operandValues
726 valueStart = &model.operandValues[operand.location.offset];
727 break;
728 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100729 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100730 {
731 // Constant specified via a Memory object
732 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
733 break;
734 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100735 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000736 {
737 // An optional input tensor with no values is not an error so should not register as a fail
738 if (optional)
739 {
740 valueStart = nullptr;
741 break;
742 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100743 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000744 }
arovir01b0717b52018-09-05 17:03:25 +0100745 default:
746 {
747 // Unsupported/invalid (e.g. can't get value of an input to the model)
748 Fail("%s: unsupported/invalid operand lifetime: %s",
749 __func__, toString(operand.lifetime).c_str());
750 valueStart = nullptr;
751 }
752 }
753
754 return valueStart;
755}
756
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100757template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100758 typename HalOperation = typename HalPolicy::Operation,
759 typename HalModel = typename HalPolicy::Model,
760 typename HalOperandType = typename HalPolicy::OperandType>
761bool GetOperandType(const HalOperation& operation,
762 uint32_t inputIndex,
763 const HalModel& model,
764 HalOperandType& type)
765{
766 using HalOperand = typename HalPolicy::Operand;
767
768 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
769 if (!operand)
770 {
771 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
772 }
773
774 type = operand->type;
775 return true;
776}
777
778template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000779 typename HalOperand = typename HalPolicy::Operand>
780bool IsOperandConstant(const HalOperand& operand)
781{
782 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
783
784 HalOperandLifeTime lifetime = operand.lifetime;
785
786 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
787 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
788 lifetime == HalOperandLifeTime::NO_VALUE;
789}
790
791template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100792 typename HalOperand = typename HalPolicy::Operand,
793 typename HalModel = typename HalPolicy::Model>
794ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
795 const HalModel& model,
796 const ConversionData& data,
797 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
798 const armnn::TensorShape* overrideTensorShape = nullptr,
799 bool optional = false)
800{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801 if (!IsOperandTypeSupportedForTensors(operand.type))
802 {
803 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
804 return ConstTensorPin();
805 }
806
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000807 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808 {
809 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
810 return ConstTensorPin();
811 }
812
813 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
814 if (!valueStart)
815 {
816 if (optional)
817 {
818 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
819 return ConstTensorPin(true);
820 }
821 // mandatory tensor with no values
822 Fail("%s: failed to get operand address", __func__);
823 return ConstTensorPin();
824 }
825
826 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000827 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
828 if (tensorInfo.HasPerAxisQuantization())
829 {
830 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
831 }
832
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100833 if (overrideTensorShape != nullptr)
834 {
835 tensorInfo.SetShape(*overrideTensorShape);
836 }
837 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
838}
839
840template<typename HalPolicy,
841 typename HalOperation = typename HalPolicy::Operation,
842 typename HalModel = typename HalPolicy::Model>
843ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
844 uint32_t inputIndex,
845 const HalModel& model,
846 const ConversionData& data,
847 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
848 const armnn::TensorShape* overrideTensorShape = nullptr,
849 bool optional = false)
850{
851 using HalOperand = typename HalPolicy::Operand;
852
853 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
854 if (!operand)
855 {
856 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
857 return ConstTensorPin();
858 }
859 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
860 model,
861 data,
862 dimensionMappings,
863 overrideTensorShape,
864 optional);
865}
866
867template<typename HalPolicy,
868 typename OutputType,
869 typename HalOperandType = typename HalPolicy::OperandType,
870 typename HalOperation = typename HalPolicy::Operation,
871 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100872bool GetInputScalar(const HalOperation& operation,
873 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100874 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100875 OutputType& outValue,
876 const HalModel& model,
877 const ConversionData& data)
878{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100879 using HalOperand = typename HalPolicy::Operand;
880
881 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100882 if (!operand)
883 {
884 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
885 }
886
887 if (operand->type != type)
888 {
889 return Fail("%s: unexpected operand type: %s (should be %s)",
890 __func__, toString(operand->type).c_str(), toString(type).c_str());
891 }
892
893 if (operand->location.length != sizeof(OutputType))
894 {
895 return Fail("%s: incorrect operand location length: %i (should be %i)",
896 __func__, operand->location.length, sizeof(OutputType));
897 }
898
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100899 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100900 if (!valueAddress)
901 {
902 return Fail("%s: failed to get address for operand", __func__);
903 }
904
905 outValue = *(static_cast<const OutputType*>(valueAddress));
906 return true;
907}
908
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100909template<typename HalPolicy,
910 typename HalOperation = typename HalPolicy::Operation,
911 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100912bool GetInputInt32(const HalOperation& operation,
913 uint32_t inputIndex,
914 int32_t& outValue,
915 const HalModel& model,
916 const ConversionData& data)
917{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100918 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100919}
920
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100921template<typename HalPolicy,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100924bool GetInputFloat32(const HalOperation& operation,
925 uint32_t inputIndex,
926 float& outValue,
927 const HalModel& model,
928 const ConversionData& data)
929{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100930 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100931}
932
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933template<typename HalPolicy,
934 typename HalOperation = typename HalPolicy::Operation,
935 typename HalOperandType = typename HalPolicy::OperandType,
936 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100937bool GetInputActivationFunctionImpl(const HalOperation& operation,
938 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100939 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100940 ActivationFn& outActivationFunction,
941 const HalModel& model,
942 const ConversionData& data)
943{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100944 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100945 {
946 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
947 __func__,
948 toString(type).c_str(),
949 toString(OperandType::INT32).c_str(),
950 toString(OperandType::TENSOR_INT32).c_str());
951 }
952
953 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100954 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100955 {
956 return Fail("%s: failed to get activation input value", __func__);
957 }
958 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
959 return true;
960}
961
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962template<typename HalPolicy,
963 typename HalOperation = typename HalPolicy::Operation,
964 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100965bool GetInputActivationFunction(const HalOperation& operation,
966 uint32_t inputIndex,
967 ActivationFn& outActivationFunction,
968 const HalModel& model,
969 const ConversionData& data)
970{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100971 return GetInputActivationFunctionImpl<HalPolicy>(operation,
972 inputIndex,
973 HalPolicy::OperandType::INT32,
974 outActivationFunction,
975 model,
976 data);
arovir01b0717b52018-09-05 17:03:25 +0100977}
978
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979template<typename HalPolicy,
980 typename HalOperation = typename HalPolicy::Operation,
981 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100982bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
983 uint32_t inputIndex,
984 ActivationFn& outActivationFunction,
985 const HalModel& model,
986 const ConversionData& data)
987{
988 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100989 return GetInputActivationFunctionImpl<HalPolicy>(operation,
990 inputIndex,
991 HalPolicy::OperandType::INT32,
992 outActivationFunction,
993 model,
994 data);
arovir01b0717b52018-09-05 17:03:25 +0100995}
996
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998template<typename HalPolicy,
999 typename HalOperation = typename HalPolicy::Operation,
1000 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001001bool GetOptionalInputActivation(const HalOperation& operation,
1002 uint32_t inputIndex,
1003 ActivationFn& activationFunction,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
1007 if (operation.inputs.size() <= inputIndex)
1008 {
1009 activationFunction = ActivationFn::kActivationNone;
1010 }
1011 else
1012 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001013 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001014 {
1015 return Fail("%s: Operation has invalid inputs", __func__);
1016 }
1017 }
1018 return true;
1019}
1020
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001021template<typename HalPolicy,
1022 typename ConvolutionDescriptor,
1023 typename HalOperation = typename HalPolicy::Operation,
1024 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001025bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1026 uint32_t dilationXIndex,
1027 ConvolutionDescriptor& descriptor,
1028 const HalModel& model,
1029 const ConversionData& data)
1030{
1031 bool success = true;
1032 if (operation.inputs.size() >= dilationXIndex + 2)
1033 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001034 success &= GetInputScalar<HalPolicy>(operation,
1035 dilationXIndex,
1036 HalPolicy::OperandType::INT32,
1037 descriptor.m_DilationX,
1038 model,
1039 data);
1040 success &= GetInputScalar<HalPolicy>(operation,
1041 dilationXIndex + 1,
1042 HalPolicy::OperandType::INT32,
1043 descriptor.m_DilationY,
1044 model,
1045 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001046 }
1047
1048 return success;
1049}
1050
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001051template<typename HalPolicy,
1052 typename HalOperand = typename HalPolicy::Operand,
1053 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001054bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001055 std::vector<int32_t>& outValues,
1056 const HalModel& model,
1057 const ConversionData& data)
1058{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001060 {
1061 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1062 }
1063
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001064 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001065 if (!startAddress)
1066 {
1067 return Fail("%s: failed to get operand address", __func__, operand.type);
1068 }
1069
1070 // Check number of bytes is sensible
1071 const uint32_t numBytes = operand.location.length;
1072 if (numBytes % sizeof(int32_t) != 0)
1073 {
1074 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1075 __func__, numBytes, sizeof(int32_t));
1076 }
1077
1078 outValues.resize(numBytes / sizeof(int32_t));
1079 memcpy(outValues.data(), startAddress, numBytes);
1080 return true;
1081}
1082
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001083template<typename HalPolicy,
1084 typename HalOperation = typename HalPolicy::Operation,
1085 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001086bool GetInputPaddingScheme(const HalOperation& operation,
1087 uint32_t inputIndex,
1088 PaddingScheme& outPaddingScheme,
1089 const HalModel& model,
1090 const ConversionData& data)
1091{
1092 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001093 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001094 {
1095 return Fail("%s: failed to get padding scheme input value", __func__);
1096 }
1097
1098 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1099 return true;
1100}
1101
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001102template<typename HalPolicy,
1103 typename HalOperation = typename HalPolicy::Operation,
1104 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001105LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1106 uint32_t inputIndex,
1107 const HalModel& model,
1108 ConversionData& data)
1109{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001110 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001111 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001112 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1113
1114 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001115 if (!operand)
1116 {
1117 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1118 return LayerInputHandle();
1119 }
1120
1121 if (!IsOperandTypeSupportedForTensors(operand->type))
1122 {
1123 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1124 return LayerInputHandle();
1125 }
1126
Sadik Armagan44bcc022019-06-18 17:21:36 +01001127 try
arovir01b0717b52018-09-05 17:03:25 +01001128 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001129 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001130 if (IsDynamicTensor(operandTensorInfo))
1131 {
1132 Fail("%s: dynamic input tensors are not supported", __func__);
1133 return LayerInputHandle();
1134 }
arovir01b0717b52018-09-05 17:03:25 +01001135
Sadik Armagan44bcc022019-06-18 17:21:36 +01001136 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001137 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001138 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001139 {
1140 // NOTE: We must check whether we can support the input tensor on at least one
1141 // of the provided backends; otherwise we cannot convert the operation
1142 bool isInputSupported = false;
1143 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1144 IsInputSupported,
1145 data.m_Backends,
1146 isInputSupported,
1147 operandTensorInfo);
1148
1149 if (!isInputSupported)
1150 {
1151 Fail("%s: unsupported input tensor", __func__);
1152 return LayerInputHandle();
1153 }
1154
1155 BOOST_FALLTHROUGH; // intentional fallthrough
1156 }
1157 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001158 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001159 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001160 // The tensor is either an operand internal to the model, or a model input.
1161 // It can be associated with an ArmNN output slot for an existing layer.
1162
1163 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1164 const uint32_t operandIndex = operation.inputs[inputIndex];
1165 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001166 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001167 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001168 case HalOperandLifeTime::CONSTANT_REFERENCE:
1169 {
1170 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1171 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1172 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001173 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001174 bool isSupported = false;
1175 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1176 IsConstantSupported,
1177 data.m_Backends,
1178 isSupported,
1179 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001180 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001181 {
1182 return LayerInputHandle();
1183 }
1184
1185 armnn::IConnectableLayer* constantLayer =
1186 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1187 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1188 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1189
1190 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1191 }
1192 else
1193 {
1194 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001195 return LayerInputHandle();
1196 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001197 break;
arovir01b0717b52018-09-05 17:03:25 +01001198 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001199 default:
arovir01b0717b52018-09-05 17:03:25 +01001200 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001201 // Unsupported lifetime for an input tensor
1202 Fail("%s: unsupported lifetime for input tensor: %s",
1203 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001204 return LayerInputHandle();
1205 }
arovir01b0717b52018-09-05 17:03:25 +01001206 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001207 }
1208 catch (UnsupportedOperand<HalOperandType>& e)
1209 {
1210 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1211 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001212 }
1213}
1214
Kevin May42477c12020-03-26 13:34:14 +00001215
1216#ifdef ARMNN_ANDROID_NN_V1_3
1217template<typename HalPolicy>
1218LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1219 uint32_t inputIndex,
1220 const::android::hardware::neuralnetworks::V1_3::Model& model,
1221 ConversionData& data)
1222{
1223 using HalOperand = typename HalPolicy::Operand;
1224 using HalOperandType = typename HalPolicy::OperandType;
1225 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1226
1227 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1228 if (!operand)
1229 {
1230 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1231 return LayerInputHandle();
1232 }
1233
1234 if (!IsOperandTypeSupportedForTensors(operand->type))
1235 {
1236 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1237 return LayerInputHandle();
1238 }
1239
1240 try
1241 {
1242 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1243 if (IsDynamicTensor(operandTensorInfo))
1244 {
1245 Fail("%s: dynamic input tensors are not supported", __func__);
1246 return LayerInputHandle();
1247 }
1248
1249 switch (operand->lifetime)
1250 {
1251 case HalOperandLifeTime::SUBGRAPH_INPUT:
1252 {
1253 // NOTE: We must check whether we can support the input tensor on at least one
1254 // of the provided backends; otherwise we cannot convert the operation
1255 bool isInputSupported = false;
1256 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1257 IsInputSupported,
1258 data.m_Backends,
1259 isInputSupported,
1260 operandTensorInfo);
1261
1262 if (!isInputSupported)
1263 {
1264 Fail("%s: unsupported input tensor", __func__);
1265 return LayerInputHandle();
1266 }
1267
1268 BOOST_FALLTHROUGH; // intentional fallthrough
1269 }
1270 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1271 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1272 {
1273 // The tensor is either an operand internal to the model, or a model input.
1274 // It can be associated with an ArmNN output slot for an existing layer.
1275
1276 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1277 const uint32_t operandIndex = operation.inputs[inputIndex];
1278 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1279 }
1280 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1281 case HalOperandLifeTime::CONSTANT_REFERENCE:
1282 {
1283 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1284 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1285 if (tensorPin.IsValid())
1286 {
1287 bool isSupported = false;
1288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1289 IsConstantSupported,
1290 data.m_Backends,
1291 isSupported,
1292 tensorPin.GetConstTensor().GetInfo());
1293 if (!isSupported)
1294 {
1295 return LayerInputHandle();
1296 }
1297
1298 armnn::IConnectableLayer* constantLayer =
1299 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1300 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1301 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1302
1303 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1304 }
1305 else
1306 {
1307 Fail("%s: invalid operand tensor", __func__);
1308 return LayerInputHandle();
1309 }
1310 break;
1311 }
1312 default:
1313 {
1314 // Unsupported lifetime for an input tensor
1315 Fail("%s: unsupported lifetime for input tensor: %s",
1316 __func__, toString(operand->lifetime).c_str());
1317 return LayerInputHandle();
1318 }
1319 }
1320 }
1321 catch (UnsupportedOperand<HalOperandType>& e)
1322 {
1323 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1324 return LayerInputHandle();
1325 }
1326}
1327#endif
1328
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001329template<typename HalPolicy,
1330 typename HalOperation = typename HalPolicy::Operation,
1331 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001332bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1333 uint32_t operationOutputIndex,
1334 armnn::IConnectableLayer& layer,
1335 uint32_t layerOutputIndex,
1336 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001337 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001338{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001339 using HalOperand = typename HalPolicy::Operand;
1340
1341 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001342 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1343 {
1344 return false;
1345 }
1346
1347 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1348
1349 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1350 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1351
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001352 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001353
1354 return true;
1355}
1356
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001357template<typename HalPolicy,
1358 typename HalOperation = typename HalPolicy::Operation,
1359 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001360armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1361 uint32_t inputIndex,
1362 const HalModel& model,
1363 ConversionData& data)
1364{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001365 using HalOperand = typename HalPolicy::Operand;
1366
1367 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001368 if (!operand)
1369 {
1370 return armnn::DataLayout::NHWC;
1371 }
1372
1373 if (!IsBool(*operand))
1374 {
1375 return armnn::DataLayout::NHWC;
1376 }
1377
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001378 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001379 if (!valueAddress)
1380 {
1381 return armnn::DataLayout::NHWC;
1382 }
1383
1384 if (*(static_cast<const bool*>(valueAddress)))
1385 {
1386 return armnn::DataLayout::NCHW;
1387 }
1388 else
1389 {
1390 return armnn::DataLayout::NHWC;
1391 }
1392}
1393
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001394template<typename HalPolicy,
1395 typename HalOperation = typename HalPolicy::Operation,
1396 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001397bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1398 uint32_t outputIndex,
1399 armnn::IConnectableLayer& layer,
1400 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001401 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001402{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001403 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1404 outputIndex,
1405 layer,
1406 outputIndex,
1407 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001408 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001409}
1410
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001411template<typename HalPolicy,
1412 typename HalOperation = typename HalPolicy::Operation,
1413 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001414bool ConvertToActivation(const HalOperation& operation,
1415 const char* operationName,
1416 const armnn::ActivationDescriptor& activationDesc,
1417 const HalModel& model,
1418 ConversionData& data)
1419{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001420 using HalOperand = typename HalPolicy::Operand;
1421
1422 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001423 if (!input.IsValid())
1424 {
1425 return Fail("%s: Input 0 is invalid", operationName);
1426 }
1427
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001428 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001429 if (!outputOperand)
1430 {
1431 return false;
1432 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001433
1434 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001435 if (IsDynamicTensor(outInfo))
1436 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001437 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001438 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001439
1440 bool isSupported = false;
1441 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1442 IsActivationSupported,
1443 data.m_Backends,
1444 isSupported,
1445 input.GetTensorInfo(),
1446 outInfo,
1447 activationDesc);
1448 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001449 {
1450 return false;
1451 }
1452
1453 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1454 BOOST_ASSERT(layer != nullptr);
1455 input.Connect(layer->GetInputSlot(0));
1456
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001457 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001458}
1459
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001460template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001461 typename HalOperation = typename HalPolicy::Operation,
1462 typename HalModel = typename HalPolicy::Model>
1463bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1464{
1465 armnn::ActivationDescriptor desc;
1466 desc.m_Function = armnn::ActivationFunction::ReLu;
1467
1468 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1469}
1470
1471template<typename HalPolicy,
1472 typename HalOperation = typename HalPolicy::Operation,
1473 typename HalModel = typename HalPolicy::Model>
1474bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1475{
1476 armnn::ActivationDescriptor desc;
1477 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1478 desc.m_A = 1.0f;
1479 desc.m_B = -1.0f;
1480
1481 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1482}
1483
1484template<typename HalPolicy,
1485 typename HalOperation = typename HalPolicy::Operation,
1486 typename HalModel = typename HalPolicy::Model>
1487bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1488{
1489 armnn::ActivationDescriptor desc;
1490 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1491 desc.m_A = 6.0f;
1492
1493 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1494}
1495
1496template<typename HalPolicy,
1497 typename HalOperation = typename HalPolicy::Operation,
1498 typename HalModel = typename HalPolicy::Model>
1499bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1500{
1501 armnn::ActivationDescriptor desc;
1502 desc.m_Function = armnn::ActivationFunction::TanH;
1503 desc.m_A = 1.0f; // android nn does not support tanH parameters
1504 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1505
1506 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1507}
1508
1509template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001510 typename HalOperation = typename HalPolicy::Operation,
1511 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001512bool ConvertPaddings(const HalOperation& operation,
1513 const HalModel& model,
1514 ConversionData& data,
1515 unsigned int rank,
1516 armnn::PadDescriptor& padDescriptor)
1517{
1518 using HalOperand = typename HalPolicy::Operand;
1519
1520 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1521 if (!paddingsOperand)
1522 {
1523 return Fail("%s: Could not read paddings operand", __func__);
1524 }
1525
1526 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1527 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1528 {
1529 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1530 }
1531
1532 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001533 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1534 {
1535 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1536 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001537
1538 // add padding for each dimension of input tensor.
1539 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1540 {
1541 int paddingBeforeInput = paddings[i];
1542 int paddingAfterInput = paddings[i + 1];
1543
1544 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1545 {
1546 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1547 }
1548
1549 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1550 }
1551
1552 return true;
1553}
1554
1555template<typename HalPolicy,
1556 typename HalOperation = typename HalPolicy::Operation,
1557 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001558bool ConvertPooling2d(const HalOperation& operation,
1559 const char* operationName,
1560 armnn::PoolingAlgorithm poolType,
1561 const HalModel& model,
1562 ConversionData& data)
1563{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001564 using HalOperand = typename HalPolicy::Operand;
1565 using HalOperandType = typename HalPolicy::OperandType;
1566
1567 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001568 if (!input.IsValid())
1569 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001570 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001571 }
1572
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001573 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001574 if (!output)
1575 {
1576 return Fail("%s: Could not read output 0", __func__);
1577 }
1578
1579 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1580 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1581
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001582 if (IsDynamicTensor(outputInfo))
1583 {
1584 return Fail("%s: Dynamic output tensors are not supported", __func__);
1585 }
1586
arovir01b0717b52018-09-05 17:03:25 +01001587 armnn::Pooling2dDescriptor desc;
1588 desc.m_PoolType = poolType;
1589 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001590 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001591
1592 ActivationFn activation;
1593
Sadik Armagan15d63e22019-07-26 16:59:35 +01001594 auto inputSize = operation.inputs.size();
1595
1596 if (inputSize >= 10)
1597 {
1598 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1599 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1600 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1601 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1602 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1603 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1604 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1605 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1606 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1607 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1608 {
1609 return Fail("%s: Operation has invalid inputs", operationName);
1610 }
1611
Kevin May42477c12020-03-26 13:34:14 +00001612 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001613 {
1614 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1615 }
1616 }
1617 else
arovir01b0717b52018-09-05 17:03:25 +01001618 {
1619 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1620 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001621 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1622 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1623 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1624 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1625 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1626 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001627 {
1628 return Fail("%s: Operation has invalid inputs", operationName);
1629 }
1630
Kevin May42477c12020-03-26 13:34:14 +00001631 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001632 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001633 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001634 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001635
1636 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1637 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1638 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1639
1640 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1641 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001642 }
1643
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001644 bool isSupported = false;
1645 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1646 IsPooling2dSupported,
1647 data.m_Backends,
1648 isSupported,
1649 inputInfo,
1650 outputInfo,
1651 desc);
1652 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001653 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001654 return false;
arovir01b0717b52018-09-05 17:03:25 +01001655 }
arovir01b0717b52018-09-05 17:03:25 +01001656
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001657 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1658 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001659 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001660 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001661 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001662
1663 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1664 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001665 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001666 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001667 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001668
1669 input.Connect(pooling2dLayer->GetInputSlot(0));
1670
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001671 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001672}
1673
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001674template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001675 typename HalOperation = typename HalPolicy::Operation,
1676 typename HalModel = typename HalPolicy::Model>
1677bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001678{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001679 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001680
1681 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1682 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1683
1684 if (!input0.IsValid() || !input1.IsValid())
1685 {
1686 return Fail("%s: Operation has invalid inputs", __func__);
1687 }
1688
1689 // The FuseActivation parameter is always the input index 2
1690 // and it should be optional
1691 ActivationFn activationFunction;
1692 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1693 {
1694 return Fail("%s: Operation has invalid inputs", __func__);
1695 }
1696
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001697 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001698 if (!outputOperand)
1699 {
1700 return false;
1701 }
1702
1703 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1704 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1705
1706 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1707 if (IsDynamicTensor(outputInfo))
1708 {
1709 return Fail("%s: Dynamic output tensors are not supported", __func__);
1710 }
1711
1712 bool isSupported = false;
1713 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1714 IsAdditionSupported,
1715 data.m_Backends,
1716 isSupported,
1717 inputInfo0,
1718 inputInfo1,
1719 outputInfo);
1720 if (!isSupported)
1721 {
1722 return false;
1723 }
1724
1725 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1726 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1727
1728 if (endLayer != nullptr)
1729 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001730 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001731 if (!isReshapeSupported)
1732 {
1733 return false;
1734 }
1735
Mike Kelly46272802019-08-14 17:00:48 +01001736 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1737 }
1738 else
1739 {
1740 return Fail("%s: ProcessActivation failed", __func__);
1741 }
1742}
1743
1744template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001745 typename HalOperation = typename HalPolicy::Operation,
1746 typename HalModel = typename HalPolicy::Model>
1747bool ConvertArgMinMax(const HalOperation& operation,
1748 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001749 ConversionData& data,
1750 armnn::ArgMinMaxFunction argMinMaxFunction)
1751{
1752 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1753
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001754 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001755 using HalOperandType = typename HalPolicy::OperandType;
1756
1757 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1758
1759 if (!input0.IsValid())
1760 {
1761 return Fail("%s: Operation has invalid inputs", __func__);
1762 }
1763
1764 int32_t axis;
1765 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1766 {
1767 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1768 }
1769
1770 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1771 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1772
1773 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1774 {
1775 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1776 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1777 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1778 return Fail("%s: Axis must be in range [-n, n)", __func__);
1779 }
1780
1781 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1782 if (!output)
1783 {
1784 return Fail("%s: Could not read output 0", __func__);
1785 }
1786
1787 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1788
1789 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1790 if (IsDynamicTensor(outputInfo))
1791 {
1792 return Fail("%s: Dynamic output tensors are not supported", __func__);
1793 }
1794
1795 armnn::ArgMinMaxDescriptor descriptor;
1796 descriptor.m_Function = argMinMaxFunction;
1797 descriptor.m_Axis = axis;
1798
1799 bool isSupported = false;
1800 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1801 IsArgMinMaxSupported,
1802 data.m_Backends,
1803 isSupported,
1804 inputInfo0,
1805 outputInfo,
1806 descriptor);
1807 if (!isSupported)
1808 {
1809 return false;
1810 }
1811
1812 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1813 assert(layer != nullptr);
1814
1815 input0.Connect(layer->GetInputSlot(0));
1816
1817 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1818}
1819
1820template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001821 typename HalOperation = typename HalPolicy::Operation,
1822 typename HalModel = typename HalPolicy::Model>
1823bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001824{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001825 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001826 using HalOperandType = typename HalPolicy::OperandType;
1827
1828 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1829 if (operation.inputs.size() <= 1)
1830 {
1831 return Fail("%s: Operation has insufficient arguments", __func__);
1832 }
1833
1834 // Get inputs and outputs
1835 const std::size_t numInputTensors = operation.inputs.size() - 1;
1836
1837 int32_t concatDim;
1838 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1839 {
1840 return Fail("%s: Operation has invalid inputs", __func__);
1841 }
1842
1843 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1844 if (!outputOperand)
1845 {
1846 return Fail("%s: Operation has no outputs", __func__);
1847 }
1848
1849
1850 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1851 armnn::TensorShape outputShape = outputInfo.GetShape();
1852
1853 //
1854 // handle negative concat dims along the lines of tensorflow as described here:
1855 // https://www.tensorflow.org/api_docs/python/tf/concat
1856 // "negative axis refers to axis + rank(values)-th dimension"
1857 //
1858 if (concatDim < 0)
1859 {
1860 concatDim += outputShape.GetNumDimensions();
1861 }
1862
1863 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1864 {
1865 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1866 }
1867
1868 std::vector<LayerInputHandle> inputHandles;
1869 std::vector<armnn::TensorShape> inputShapes;
1870
1871 inputHandles.reserve(numInputTensors);
1872 inputShapes.reserve(numInputTensors);
1873
1874 bool inputsHaveBeenReshaped = false;
1875 unsigned int tensorDimensionsAdded = 0;
1876
1877 for (uint32_t i = 0; i < numInputTensors; ++i)
1878 {
1879 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1880 if (!operand)
1881 {
1882 return Fail("%s: Operation has invalid inputs", __func__);
1883 }
1884
Teresa Charlin3b959602019-10-31 17:05:47 +00001885 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1886 if (!operandInputHandle.IsValid())
1887 {
1888 return Fail("%s: Operation has invalid inputs", __func__);
1889 }
Mike Kellyb8805202019-07-31 17:25:43 +01001890
Teresa Charlin3b959602019-10-31 17:05:47 +00001891 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001892 if (operandShape.GetNumDimensions() == 0)
1893 {
1894 return Fail("%s: Operands with rank 0 are not supported", __func__);
1895 }
1896
1897 if (RequiresReshape(operandShape))
1898 {
1899 inputsHaveBeenReshaped = true;
1900
1901 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1902
1903 // Expand the tensor to three dimensions
1904 if (operandShape.GetNumDimensions() == 2)
1905 {
1906 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1907 tensorDimensionsAdded = 1;
1908 }
1909 else
1910 {
1911 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1912 tensorDimensionsAdded = 2;
1913 }
1914
Kevin Mayaed08ac2019-12-12 16:33:31 +00001915 armnn::ReshapeDescriptor reshapeDescriptor;
1916 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1917
1918 bool isSupported = false;
1919 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1920 IsReshapeSupported,
1921 data.m_Backends,
1922 isSupported,
1923 operandInputHandle.GetTensorInfo(),
1924 reshapeInfo,
1925 reshapeDescriptor);
1926 if (!isSupported)
1927 {
1928 return false;
1929 }
1930
Mike Kellyb8805202019-07-31 17:25:43 +01001931 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1932 *data.m_Network,
1933 operandInputHandle,
1934 reshapeInfo
1935 );
1936
1937 // Point to the reshape operation rather then the input operation
1938 operandShape = reshapeInfo.GetShape();
1939 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1940 }
1941
1942 inputShapes.emplace_back(operandShape);
1943 inputHandles.emplace_back(operandInputHandle);
1944
1945 if (!inputHandles.back().IsValid())
1946 {
1947 return Fail("%s: Operation has invalid inputs", __func__);
1948 }
1949 }
1950
1951 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1952
1953 if (inputsHaveBeenReshaped)
1954 {
1955 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1956 concatDim += tensorDimensionsAdded;
1957
1958 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1959 if (tensorDimensionsAdded == 1)
1960 {
1961 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1962 }
1963 else if (tensorDimensionsAdded == 2)
1964 {
1965 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1966 }
1967 }
1968
1969 // Check if permutations is required and get the pair of permutations required for the concatenation.
1970 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1971 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1972 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1973
1974 bool needPermute =
1975 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1976
1977 if (needPermute)
1978 {
Mike Kelly4a956582020-02-28 10:32:09 +00001979 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01001980 }
1981
1982 outputInfo.SetShape(outputShape);
1983
1984 // this is no-op for identity swizzles, otherwise it replaces both
1985 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001986 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1987 {
1988 return false;
1989 }
Mike Kellyb8805202019-07-31 17:25:43 +01001990
1991 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1992 armnn::OriginsDescriptor concatDescriptor;
1993
1994 try
1995 {
1996 // The concat descriptor is always created across the only supported concat dimension
1997 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1998 concatDescriptor =
1999 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
2000 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00002001 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002002 {
2003 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2004 }
2005
2006 // Validate the output shape is correct given the input shapes based on the
2007 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
2008 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2009 {
2010 return Fail("%s: Error validating the output shape for concat", __func__);
2011 }
2012
2013 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2014 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
2015 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
2016
2017 bool isSupported = false;
2018 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2019 IsConcatSupported,
2020 data.m_Backends,
2021 isSupported,
2022 inputTensorInfos,
2023 outputInfo,
2024 concatDescriptor);
2025 if (!isSupported)
2026 {
2027 return false;
2028 }
2029
2030 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2031 assert(layer != nullptr);
2032 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2033
2034 // Connect inputs to the layer
2035 const int numInputSlots = layer->GetNumInputSlots();
2036 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2037 for (int i = 0; i < numInputSlots; ++i)
2038 {
2039 // connect the input directly to the merge (concat) layer
2040 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2041 }
2042
2043 if (needPermute)
2044 {
Mike Kelly4a956582020-02-28 10:32:09 +00002045 armnn::TransposeDescriptor transposeDesc;
2046 transposeDesc.m_DimMappings = permutationPair.second;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002047
2048 bool isSupported = false;
2049 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002050 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002051 data.m_Backends,
2052 isSupported,
2053 layer->GetOutputSlot(0).GetTensorInfo(),
2054 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002055 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002056 if (!isSupported)
2057 {
2058 return false;
2059 }
Mike Kellyb8805202019-07-31 17:25:43 +01002060 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00002061 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
2062 layer->GetOutputSlot(0),
2063 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002064 layer = &deswizzleLayer;
2065 }
2066
2067 if (inputsHaveBeenReshaped)
2068 {
2069 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
2070
2071 // Undo the reshape knowing the amount of dimensions added
2072 if (tensorDimensionsAdded == 1)
2073 {
2074 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
2075 afterConcatInfo.GetShape()[2] }));
2076 }
2077 else if (tensorDimensionsAdded == 2)
2078 {
2079 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
2080 }
2081
Kevin Mayaed08ac2019-12-12 16:33:31 +00002082 armnn::ReshapeDescriptor reshapeDescriptor;
2083 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
2084
2085 bool isSupported = false;
2086 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2087 IsReshapeSupported,
2088 data.m_Backends,
2089 isSupported,
2090 layer->GetOutputSlot(0).GetTensorInfo(),
2091 afterConcatInfo,
2092 reshapeDescriptor);
2093 if (!isSupported)
2094 {
2095 return false;
2096 }
2097
Mike Kellyb8805202019-07-31 17:25:43 +01002098 layer = &AddReshapeLayer(
2099 *data.m_Network,
2100 layer->GetOutputSlot(0),
2101 afterConcatInfo
2102 );
2103 }
2104
2105 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2106}
2107
2108template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002109 typename HalOperation = typename HalPolicy::Operation,
2110 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002111bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2112{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002113 using HalOperand = typename HalPolicy::Operand;
2114 using HalOperandType = typename HalPolicy::OperandType;
2115
2116 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002117 if (!input.IsValid())
2118 {
2119 return Fail("%s: Operation has invalid inputs", __func__);
2120 }
2121
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002122 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002123 if (!output)
2124 {
2125 return Fail("%s: Could not read output 0", __func__);
2126 }
2127
2128 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002129 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002130
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002131 if (IsDynamicTensor(outputInfo))
2132 {
2133 return Fail("%s: Dynamic output tensors are not supported", __func__);
2134 }
2135
Mike Kellyb5fdf382019-06-11 16:35:25 +01002136 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002137 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2138 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002139
2140 if (!weightsPin.IsValid() || !biasPin.IsValid())
2141 {
2142 return Fail("%s: Operation has invalid inputs", __func__);
2143 }
2144
2145 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002146 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002147 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2148
2149 armnn::Convolution2dDescriptor desc;
2150 desc.m_DataLayout = armnn::DataLayout::NHWC;
2151 ActivationFn activation;
2152
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002153 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002154 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002155 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2156 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2157 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2158 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2159 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2160 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002161 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002162 {
2163 return Fail("%s: Operation has invalid inputs", __func__);
2164 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002165 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002166 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002167 {
2168 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002169 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2170 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2171 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002172 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173 {
2174 return Fail("%s: Operation has invalid inputs", __func__);
2175 }
2176
2177 const uint32_t kernelX = weights.GetShape()[2];
2178 const uint32_t kernelY = weights.GetShape()[1];
2179 const uint32_t inputX = inputInfo.GetShape()[2];
2180 const uint32_t inputY = inputInfo.GetShape()[1];
2181
2182 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2183 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002184 }
2185 else
2186 {
2187 return Fail("%s: Unsupported number of operation inputs", __func__);
2188 }
2189
2190 desc.m_BiasEnabled = true;
2191 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2192
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002193 bool isSupported = false;
2194 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2195 IsConvolution2dSupported,
2196 data.m_Backends,
2197 isSupported,
2198 inputInfo,
2199 outputInfo,
2200 desc,
2201 weights.GetInfo(),
2202 biases);
2203 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002204 {
2205 return false;
2206 }
2207
2208 armnn::IConnectableLayer* startLayer =
2209 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2210
2211 if (!startLayer)
2212 {
2213 return Fail("%s: AddConvolution2dLayer failed", __func__);
2214 }
2215
2216 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2217
2218 if (!endLayer)
2219 {
2220 return Fail("%s: ProcessActivation failed", __func__);
2221 }
2222
2223 input.Connect(startLayer->GetInputSlot(0));
2224
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002225 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002226}
2227
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002228template<typename HalPolicy,
2229 typename HalOperation = typename HalPolicy::Operation,
2230 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002231bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2232{
2233 using HalOperand = typename HalPolicy::Operand;
2234 using HalOperandType = typename HalPolicy::OperandType;
2235
2236 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2237 if (!input.IsValid() )
2238 {
2239 return Fail("%s: Operation has invalid inputs", __func__);
2240 }
2241
2242 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2243 unsigned int rank = inputInfo.GetNumDimensions();
2244 if (rank != 4)
2245 {
2246 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2247 }
2248
2249 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2250 if (!output)
2251 {
2252 return Fail("%s: Could not read output 0", __func__);
2253 }
2254
2255 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2256 if (IsDynamicTensor(outputInfo))
2257 {
2258 return Fail("%s: Dynamic output tensors are not supported", __func__);
2259 }
2260
2261 armnn::DepthToSpaceDescriptor descriptor;
2262
2263 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2264 if (descriptor.m_BlockSize <= 1)
2265 {
2266 return Fail("%s: Block size must be at least 1 in all dimensions");
2267 }
2268
2269 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002270 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002271 {
2272 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2273 }
2274
2275 bool isSupported = false;
2276 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2277 IsDepthToSpaceSupported,
2278 data.m_Backends,
2279 isSupported,
2280 inputInfo,
2281 outputInfo,
2282 descriptor);
2283 if (!isSupported)
2284 {
2285 return false;
2286 }
2287
2288 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2289 assert(layer != nullptr);
2290 input.Connect(layer->GetInputSlot(0));
2291
2292 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2293}
2294
2295template<typename HalPolicy,
2296 typename HalOperation = typename HalPolicy::Operation,
2297 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002298bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2299{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002300 using HalOperand = typename HalPolicy::Operand;
2301 using HalOperandType = typename HalPolicy::OperandType;
2302
2303 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002304
2305 if (!input.IsValid())
2306 {
2307 return Fail("%s: Operation has invalid inputs", __func__);
2308 }
2309
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002310 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002311
2312 if (!output)
2313 {
2314 return Fail("%s: Could not read output 0", __func__);
2315 }
2316
2317 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002318 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002319
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002320 if (IsDynamicTensor(outputInfo))
2321 {
2322 return Fail("%s: Dynamic output tensors are not supported", __func__);
2323 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002324
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002325 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002326 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002327 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002328
2329 if (weightsOperand == nullptr)
2330 {
2331 return Fail("%s: Operand is invalid", __func__);
2332 }
2333 armnn::DepthwiseConvolution2dDescriptor desc;
2334 desc.m_DataLayout = armnn::DataLayout::NHWC;
2335
Mike Kellyb5fdf382019-06-11 16:35:25 +01002336 // Reinterpret weight data as [ H, W, I, M ]
2337 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2338 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002339 inputInfo.GetShape()[3],
2340 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341
2342 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2343 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2344
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002345 const ConstTensorPin weightsPin =
2346 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2347 1,
2348 model,
2349 data,
2350 HWIMToMIHW,
2351 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002352
2353 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002354 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002355
2356 if (!weightsPin.IsValid() || !biasPin.IsValid())
2357 {
2358 return Fail("%s: Operation has invalid inputs", __func__);
2359 }
2360
2361 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2362 armnn::ConstTensor bias = biasPin.GetConstTensor();
2363 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2364
2365 ActivationFn activation;
2366
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002367 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002368 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002369 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2370 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2371 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2372 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2373 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2374 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002375 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002376 {
2377 return Fail("%s: Operation has invalid inputs", __func__);
2378 }
2379 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002380 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002381 {
2382 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002383 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2384 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2385 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002386 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002387 {
2388 return Fail("%s: Operation has invalid inputs", __func__);
2389 }
2390
2391 const uint32_t kernelX = weights.GetShape()[3];
2392 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002393 const uint32_t inputX = inputInfo.GetShape()[2];
2394 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002395
2396 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2397 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2398 }
2399 else
2400 {
2401 return Fail("%s: Unsupported number of operation inputs", __func__);
2402 }
2403
2404 desc.m_BiasEnabled = true;
2405 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2406
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002407 bool isSupported = false;
2408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2409 IsDepthwiseConvolutionSupported,
2410 data.m_Backends,
2411 isSupported,
2412 inputInfo,
2413 outputInfo,
2414 desc,
2415 weights.GetInfo(),
2416 biases);
2417 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002418 {
2419 return false;
2420 }
2421
2422 armnn::IConnectableLayer* startLayer =
2423 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2424 if (!startLayer)
2425 {
2426 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2427 }
2428
2429 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2430 if (!endLayer)
2431 {
2432 return Fail("%s: ProcessActivation failed", __func__);
2433 }
2434
2435 input.Connect(startLayer->GetInputSlot(0));
2436
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002437 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002438}
2439
Mike Kelly3c673942019-07-25 09:26:06 +01002440template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002441 typename HalOperation = typename HalPolicy::Operation,
2442 typename HalModel = typename HalPolicy::Model>
2443bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002444{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002445 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002446
2447 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2448 if (!input.IsValid())
2449 {
2450 return Fail("%s: Operation has invalid input", __func__);
2451 }
2452
Sadik Armagan98c0f662019-11-21 15:54:36 +00002453 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2454 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2455 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2456 {
2457 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2458 }
2459
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002460 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002461 if (!outputOperand)
2462 {
2463 return Fail("%s: Operation has invalid outputs", __func__);
2464 }
2465
2466 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2467 if (IsDynamicTensor(outputInfo))
2468 {
2469 return Fail("%s: Dynamic output tensors are not supported", __func__);
2470 }
2471
2472 bool isSupported = false;
2473 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2474 IsDequantizeSupported,
2475 data.m_Backends,
2476 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002477 inputInfo,
2478 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002479 if (!isSupported)
2480 {
2481 return false;
2482 }
2483
2484 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2485 assert(layer != nullptr);
2486 input.Connect(layer->GetInputSlot(0));
2487
2488 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2489}
2490
2491template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002492 typename HalOperation = typename HalPolicy::Operation,
2493 typename HalModel = typename HalPolicy::Model>
2494bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002495{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002496 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002497
2498 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2499 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2500
2501 if (!input0.IsValid() || !input1.IsValid())
2502 {
2503 return Fail("%s: Operation has invalid inputs", __func__);
2504 }
2505
2506 // The FuseActivation parameter is always the input index 2
2507 // and it should be optional
2508 ActivationFn activationFunction;
2509 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2510 {
2511 return Fail("%s: Operation has invalid inputs", __func__);
2512 }
2513
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002514 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002515 if (!output)
2516 {
2517 return Fail("%s: Could not read output 0", __func__);
2518 }
2519
2520 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2521 if (IsDynamicTensor(outputInfo))
2522 {
2523 return Fail("%s: Dynamic output tensors are not supported", __func__);
2524 }
2525
2526 bool isSupported = false;
2527 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2528 IsDivisionSupported,
2529 data.m_Backends,
2530 isSupported,
2531 input0.GetTensorInfo(),
2532 input1.GetTensorInfo(),
2533 outputInfo);
2534 if (!isSupported)
2535 {
2536 return false;
2537 }
2538
2539 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2540 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2541
2542 if (endLayer)
2543 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002544 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002545 if (!isReshapeSupported)
2546 {
2547 return false;
2548 }
2549
Mike Kelly46272802019-08-14 17:00:48 +01002550 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2551 }
2552 return Fail("%s: ProcessActivation failed", __func__);
2553}
2554
2555template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002556 typename HalOperation = typename HalPolicy::Operation,
2557 typename HalModel = typename HalPolicy::Model>
2558bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002559{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002560 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002561
2562 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2563 if (!input.IsValid())
2564 {
2565 return Fail("%s: Operation has invalid inputs", __func__);
2566 }
2567
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002568 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002569 if (!outputOperand)
2570 {
2571 return Fail("%s: Operation has invalid outputs", __func__);
2572 }
2573
2574 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2575 if (IsDynamicTensor(outputInfo))
2576 {
2577 return Fail("%s: Dynamic output tensors are not supported", __func__);
2578 }
2579
2580 bool isSupported = false;
2581 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2582 IsFloorSupported,
2583 data.m_Backends,
2584 isSupported,
2585 input.GetTensorInfo(),
2586 outputInfo);
2587 if (!isSupported)
2588 {
2589 return false;
2590 }
2591
2592 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2593 assert(layer != nullptr);
2594 input.Connect(layer->GetInputSlot(0));
2595
2596 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2597}
2598
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002599inline bool IsQSymm8(const V1_0::Operand&)
2600{
2601 return false;
2602}
2603
Kevin May42477c12020-03-26 13:34:14 +00002604#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002605
2606inline bool IsQSymm8(const V1_2::Operand& operand)
2607{
2608 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2609}
2610
2611#endif
2612
Kevin May42477c12020-03-26 13:34:14 +00002613#ifdef ARMNN_ANDROID_NN_V1_3
2614
2615inline bool IsQSymm8(const V1_3::Operand& operand)
2616{
2617 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2618}
2619
2620#endif
2621
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002622enum class DequantizeStatus
2623{
2624 SUCCESS,
2625 NOT_REQUIRED,
2626 INVALID_OPERAND
2627};
2628
2629using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2630
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002631template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002632 typename HalOperation = typename HalPolicy::Operation,
2633 typename HalModel = typename HalPolicy::Model>
2634DequantizeResult DequantizeIfRequired(size_t operand_index,
2635 const HalOperation& operation,
2636 const HalModel& model,
2637 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002638{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002639 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002640
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002641 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002642 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002643 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002644 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002645 }
2646
2647 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2648 {
2649 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002650 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002651 }
2652
2653 const size_t weightsInputIndex = operation.inputs[operand_index];
2654
2655 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2656 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002657 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002658 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002659 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002660 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002661 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2662 {
2663 continue;
2664 }
2665
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002666 size_t outOpIndex = weightsInputIndex + 1;
2667 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002668 {
2669 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002670 }
2671
2672 if (outOpIndex != weightsInputIndex)
2673 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002674 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002675 }
2676
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002677 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002678 BOOST_ASSERT(operand);
2679
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002680 if (!IsQSymm8(*operand))
2681 {
2682 // Only supporting dequantize from QSYMM8 to FLOAT
2683 break;
2684 }
2685
2686 // Allocate a new buffer for the dequantized data and manually dequantize
2687 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2688 if (!startValue)
2689 {
2690 // Failed to get the operand address
2691 break;
2692 }
2693
2694 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2695 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002696 const float quantizationScale = operand->scale;
2697
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002698 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2699 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2700 {
2701 float* dstPtr = dequantizedBuffer.get();
2702 BOOST_ASSERT(dstPtr);
2703 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2704 }
2705
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002706 // Construct tensor info for dequantized ConstTensor
2707 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2708 operand->dimensions.data(),
2709 armnn::DataType::Float32);
2710
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002711 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2712 std::move(tensorInfo),
2713 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002714 }
2715
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002716 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002717}
2718
2719template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002720 typename HalOperation = typename HalPolicy::Operation,
2721 typename HalModel = typename HalPolicy::Model>
2722ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2723 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002724 const ConversionData& data,
2725 size_t operandIndex,
2726 bool optional = false)
2727{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002728 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2729
2730 DequantizeStatus status = std::get<3>(dequantized);
2731 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002732 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002733 case DequantizeStatus::INVALID_OPERAND:
2734 {
2735 // return invalid const tensor pin
2736 return ConstTensorPin();
2737 }
2738 case DequantizeStatus::NOT_REQUIRED:
2739 {
2740 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2741 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2742 }
2743 case DequantizeStatus::SUCCESS:
2744 default:
2745 {
2746 return ConstTensorPin(
2747 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2748 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002749 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002750}
2751
2752
Mike Kelly46272802019-08-14 17:00:48 +01002753template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002754 typename HalOperation = typename HalPolicy::Operation,
2755 typename HalModel = typename HalPolicy::Model>
2756bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002757{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002758 using HalOperand = typename HalPolicy::Operand;
2759
Mike Kelly46272802019-08-14 17:00:48 +01002760 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2761 if (!input.IsValid())
2762 {
2763 return Fail("%s: Operation has invalid inputs", __func__);
2764 }
2765
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002766 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002767 if (!output)
2768 {
2769 return Fail("%s: Could not read output 0", __func__);
2770 }
2771
2772 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2773 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2774
2775 if (IsDynamicTensor(outputInfo))
2776 {
2777 return Fail("%s: Dynamic output tensors are not supported", __func__);
2778 }
2779
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002780 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2781 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002782
2783 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002784 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002785 return Fail("%s: Operation has invalid weights", __func__);
2786 }
2787
2788 if (!biasPin.IsValid())
2789 {
2790 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002791 }
2792
2793 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2794 armnn::ConstTensor bias = biasPin.GetConstTensor();
2795 armnn::TensorInfo reshapedInfo = inputInfo;
2796
2797 try
2798 {
2799 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002800 }
2801 catch (const std::exception& e)
2802 {
Mike Kelly46272802019-08-14 17:00:48 +01002803 return Fail("%s: %s", __func__, e.what());
2804 }
2805
2806 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2807 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2808
2809 ActivationFn activationFunction;
2810 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2811 {
2812 return Fail("%s: Operation has invalid inputs", __func__);
2813 }
2814
2815 armnn::FullyConnectedDescriptor desc;
2816 desc.m_TransposeWeightMatrix = true;
2817 desc.m_BiasEnabled = true;
2818
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002819 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2820 weights.GetInfo().GetShape(),
2821 outputInfo.GetShape(),
2822 desc.m_TransposeWeightMatrix))
2823 {
2824 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2825 }
2826
Mike Kelly46272802019-08-14 17:00:48 +01002827 bool isSupported = false;
2828 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2829 IsFullyConnectedSupported,
2830 data.m_Backends,
2831 isSupported,
2832 reshapedInfo,
2833 outputInfo,
2834 weights.GetInfo(),
2835 bias.GetInfo(),
2836 desc);
2837 if (!isSupported)
2838 {
2839 return false;
2840 }
2841
2842 armnn::IConnectableLayer* startLayer =
2843 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2844 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2845
2846 if (endLayer != nullptr)
2847 {
2848 if (inputInfo.GetNumDimensions() > 2U)
2849 {
2850 armnn::ReshapeDescriptor reshapeDescriptor;
2851 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2852
2853 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2854 assert(reshapeLayer != nullptr);
2855 input.Connect(reshapeLayer->GetInputSlot(0));
2856 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2857 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2858 }
2859 else
2860 {
2861 input.Connect(startLayer->GetInputSlot(0));
2862 }
2863
2864 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2865 }
2866 else
2867 {
2868 return Fail("%s: ProcessActivation failed", __func__);
2869 }
2870}
2871
2872template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002873 typename HalOperation = typename HalPolicy::Operation,
2874 typename HalModel = typename HalPolicy::Model>
2875bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002876{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002877 using HalOperand = typename HalPolicy::Operand;
2878
Mike Kelly999e2092019-08-15 10:46:46 +01002879 if (operation.inputs.size() != 1)
2880 {
2881 return Fail("%s: Optional inputs are not supported", __func__);
2882 }
2883
Mike Kelly46272802019-08-14 17:00:48 +01002884 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2885 if (!input.IsValid())
2886 {
2887 return Fail("%s: Operation has invalid inputs", __func__);
2888 }
2889
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002890 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002891 if (!output)
2892 {
2893 return Fail("%s: Could not read output 0", __func__);
2894 }
2895
2896 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2897 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2898
2899 if (IsDynamicTensor(outputInfo))
2900 {
2901 return Fail("%s: Dynamic output tensors are not supported", __func__);
2902 }
2903 if (outputInfo.GetNumDimensions() != 4u)
2904 {
2905 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2906 }
2907
2908 armnn::L2NormalizationDescriptor desc;
2909 desc.m_DataLayout = armnn::DataLayout::NHWC;
2910
2911 bool isSupported = false;
2912 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2913 IsL2NormalizationSupported,
2914 data.m_Backends,
2915 isSupported,
2916 inputInfo,
2917 outputInfo,
2918 desc);
2919 if (!isSupported)
2920 {
2921 return false;
2922 }
2923
2924 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2925 assert(layer != nullptr);
2926 input.Connect(layer->GetInputSlot(0));
2927
2928 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2929}
2930
2931template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002932 typename HalOperation = typename HalPolicy::Operation,
2933 typename HalModel = typename HalPolicy::Model>
2934bool ConvertLocalResponseNormalization(const HalOperation& operation,
2935 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002936 ConversionData& data)
2937{
Mike Kelly999e2092019-08-15 10:46:46 +01002938 if (operation.inputs.size() != 5)
2939 {
2940 return Fail("%s: Optional inputs are not supported", __func__);
2941 }
2942
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002943 using HalOperand = typename HalPolicy::Operand;
2944 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002945
2946 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2947 if (!input.IsValid())
2948 {
2949 return Fail("%s: Operation has invalid inputs", __func__);
2950 }
2951
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002952 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002953 if (!output)
2954 {
2955 return Fail("%s: Could not read output 0", __func__);
2956 }
2957
2958 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2959 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2960
2961 if (IsDynamicTensor(outputInfo))
2962 {
2963 return Fail("%s: Dynamic output tensors are not supported", __func__);
2964 }
2965 if (outputInfo.GetNumDimensions() != 4u)
2966 {
2967 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2968 }
2969
2970 armnn::NormalizationDescriptor descriptor;
2971 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2972 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2973 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2974
2975 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002976 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002977 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2978 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2979 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2980 {
2981 return Fail("%s: Operation has invalid inputs", __func__);
2982 }
2983
2984 // ArmNN expects normSize to be the full size of the normalization
2985 // window rather than the radius as in AndroidNN.
2986 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2987
2988 bool isSupported = false;
2989 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2990 IsNormalizationSupported,
2991 data.m_Backends,
2992 isSupported,
2993 inputInfo,
2994 outputInfo,
2995 descriptor);
2996 if (!isSupported)
2997 {
2998 return false;
2999 }
3000
3001
3002 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3003 assert(layer != nullptr);
3004 input.Connect(layer->GetInputSlot(0));
3005
3006 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3007}
3008
3009template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003010 typename HalOperation = typename HalPolicy::Operation,
3011 typename HalModel = typename HalPolicy::Model>
3012bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003013{
Mike Kelly46272802019-08-14 17:00:48 +01003014 armnn::ActivationDescriptor desc;
3015 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3016
3017 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3018}
3019
3020template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003021 typename HalOperation = typename HalPolicy::Operation,
3022 typename HalModel = typename HalPolicy::Model>
3023bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003024{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003025 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003026
3027 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3028 if (!input.IsValid())
3029 {
3030 return Fail("%s: Operation has invalid inputs", __func__);
3031 }
3032
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003033 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003034 if (!output)
3035 {
3036 return Fail("%s: Could not read output 0", __func__);
3037 }
3038
3039 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3040 if (IsDynamicTensor(outputInfo))
3041 {
3042 return Fail("%s: Dynamic output tensors are not supported", __func__);
3043 }
3044
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003045 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003046 if (!axisOperand)
3047 {
3048 return Fail("%s: Could not read input 1", __func__);
3049 }
3050
3051 std::vector<int32_t> axis;
3052 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3053 {
3054 return Fail("%s: Input 1 has invalid values", __func__);
3055 }
3056
3057 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3058
3059 // Convert the axis to unsigned int and remove duplicates.
3060 unsigned int rank = inputInfo.GetNumDimensions();
3061 std::set<unsigned int> uniqueAxis;
3062 std::transform(axis.begin(), axis.end(),
3063 std::inserter(uniqueAxis, uniqueAxis.begin()),
3064 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3065
3066 // Get the "keep dims" flag.
3067 int32_t keepDims = 0;
3068 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3069 {
3070 return Fail("%s: Could not read input 2", __func__);
3071 }
3072
3073 armnn::MeanDescriptor descriptor;
3074 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3075 descriptor.m_KeepDims = keepDims > 0;
3076
3077 bool isSupported = false;
3078 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3079 IsMeanSupported,
3080 data.m_Backends,
3081 isSupported,
3082 inputInfo,
3083 outputInfo,
3084 descriptor);
3085 if (!isSupported)
3086 {
3087 return false;
3088 }
3089
3090 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3091 assert(layer != nullptr);
3092 input.Connect(layer->GetInputSlot(0));
3093
3094 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3095}
3096
3097template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003098 typename HalOperation = typename HalPolicy::Operation,
3099 typename HalModel = typename HalPolicy::Model>
3100bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003101{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003102 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003103
3104 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3105 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3106
3107 if (!input0.IsValid() || !input1.IsValid())
3108 {
3109 return Fail("%s: Operation has invalid inputs", __func__);
3110 }
3111
3112 // The FuseActivation parameter is always the input index 2
3113 // and it should be optional
3114 ActivationFn activationFunction;
3115 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3116 {
3117 return Fail("%s: Operation has invalid inputs", __func__);
3118 }
3119
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003120 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003121
3122 if (outputOperand == nullptr)
3123 {
3124 return false;
3125 }
3126
3127 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3128 if (IsDynamicTensor(outputInfo))
3129 {
3130 return Fail("%s: Dynamic output tensors are not supported", __func__);
3131 }
3132
3133 bool isSupported = false;
3134 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3135 IsMultiplicationSupported,
3136 data.m_Backends,
3137 isSupported,
3138 input0.GetTensorInfo(),
3139 input1.GetTensorInfo(),
3140 outputInfo);
3141 if (!isSupported)
3142 {
3143 return false;
3144 }
3145
3146 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3147 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3148
3149 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3150 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3151
3152 if (endLayer != nullptr)
3153 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003154 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003155 if (!isReshapeSupported)
3156 {
3157 return false;
3158 }
3159
Mike Kelly46272802019-08-14 17:00:48 +01003160 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3161 }
3162 else
3163 {
3164 return Fail("%s: ProcessActivation failed", __func__);
3165 }
3166}
3167
3168template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003169 typename HalOperation = typename HalPolicy::Operation,
3170 typename HalModel = typename HalPolicy::Model>
3171bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003172{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003173 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003174
Mike Kelly3c673942019-07-25 09:26:06 +01003175 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3176 if (!input.IsValid())
3177 {
3178 return Fail("%s: Operation has invalid inputs", __func__);
3179 }
3180
3181 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3182 unsigned int rank = inputInfo.GetNumDimensions();
3183
3184 armnn::PadDescriptor descriptor;
3185 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3186 {
3187 return Fail("%s: Could not convert paddings", __func__);
3188 }
3189
3190 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3191 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3192 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003193 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003194 {
3195 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3196 }
3197
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003198 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003199 if (!output)
3200 {
3201 return Fail("%s: Could not read output", __func__);
3202 }
3203
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003204 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003205 if (IsDynamicTensor(outputInfo))
3206 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003207 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003208 }
3209
3210 bool isSupported = false;
3211 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3212 IsPadSupported,
3213 data.m_Backends,
3214 isSupported,
3215 inputInfo,
3216 outputInfo,
3217 descriptor);
3218 if (!isSupported)
3219 {
3220 return false;
3221 }
3222
3223 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3224 assert(layer != nullptr);
3225 input.Connect(layer->GetInputSlot(0));
3226 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3227
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003228 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003229}
3230
Mike Kelly0a879362019-07-29 16:56:31 +01003231template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003232 typename HalOperation = typename HalPolicy::Operation,
3233 typename HalModel = typename HalPolicy::Model>
3234bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003235{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003236 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003237
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003238 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3239 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3240 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003241
3242 if (inputOperand == nullptr
3243 || requestedShapeOperand == nullptr
3244 || outputOperand == nullptr)
3245 {
3246 return Fail("%s: Operation has invalid inputs", __func__);
3247 }
3248
3249 if (requestedShapeOperand->dimensions.size() != 1)
3250 {
3251 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3252 __func__, requestedShapeOperand->dimensions.size());
3253 }
3254
3255 std::vector<int32_t> targetDimensions;
3256 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3257 {
3258 return Fail("%s: Could not read values of input 1", __func__);
3259 }
3260
3261 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3262
3263 Shape requestedShape;
3264 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3265 // function that resolves these values into a fully specified tensor shape.
3266 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3267 {
3268 return Fail("%s: Failed to resolve the requested shape", __func__);
3269 }
3270
3271 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3272 if (!SameShape(requestedShape, outputOperandShape))
3273 {
3274 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3275 }
3276
3277 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3278 if (!input.IsValid())
3279 {
3280 return Fail("%s: Could not read input 0", __func__);
3281 }
3282
3283 armnn::ReshapeDescriptor reshapeDescriptor;
3284 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3285 requestedShape.dimensions.data());
3286
3287 bool isSupported = false;
3288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3289 IsReshapeSupported,
3290 data.m_Backends,
3291 isSupported,
3292 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003293 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003294 reshapeDescriptor);
3295 if (!isSupported)
3296 {
3297 return false;
3298 }
3299
3300 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3301 assert(layer != nullptr);
3302 input.Connect(layer->GetInputSlot(0));
3303
3304 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3305}
3306
3307template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003308 typename HalOperation = typename HalPolicy::Operation,
3309 typename HalModel = typename HalPolicy::Model>
3310bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003311{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003312 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003313
Mike Kelly0a879362019-07-29 16:56:31 +01003314 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3315 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3316
3317 if (!input0.IsValid() || !input1.IsValid())
3318 {
3319 return Fail("%s: Operation has invalid inputs", __func__);
3320 }
3321
3322 // The FuseActivation parameter is always the input index 2
3323 // and it should be optional
3324 ActivationFn activationFunction;
3325 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3326 {
3327 return Fail("%s: Operation has invalid inputs", __func__);
3328 }
3329
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003330 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003331 if (!output)
3332 {
3333 return Fail("%s: Could not read output 0", __func__);
3334 }
3335
3336 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3337 if (IsDynamicTensor(outputInfo))
3338 {
3339 return Fail("%s: Dynamic output tensors are not supported", __func__);
3340 }
3341
3342 bool isSupported = false;
3343 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3344 IsSubtractionSupported,
3345 data.m_Backends,
3346 isSupported,
3347 input0.GetTensorInfo(),
3348 input1.GetTensorInfo(),
3349 outputInfo);
3350 if (!isSupported)
3351 {
3352 return false;
3353 }
3354
3355 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3356 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3357
3358 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3359 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3360
3361 if (endLayer)
3362 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003363 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003364 if (!isReshapeSupported)
3365 {
3366 return false;
3367 }
Mike Kelly0a879362019-07-29 16:56:31 +01003368 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3369 }
3370
3371 return Fail("%s: ProcessActivation failed", __func__);
3372}
3373
Finn Williams23b87b32019-07-30 11:44:05 +01003374template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003375 typename HalOperation = typename HalPolicy::Operation,
3376 typename HalModel = typename HalPolicy::Model>
3377bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003378{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003379 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003380
3381 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3382 if (!input.IsValid())
3383 {
3384 return Fail("%s: Operation has invalid inputs", __func__);
3385 }
3386
3387 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3388 unsigned int rank = inputInfo.GetNumDimensions();
3389 if (rank > 4)
3390 {
3391 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3392 }
3393
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003394 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003395 if (!output)
3396 {
3397 return Fail("%s: Could not read output 0", __func__);
3398 }
3399
3400 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3401 {
3402 return Fail("%s: Dynamic output tensors are not supported", __func__);
3403 }
3404
3405 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3406 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003407 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003408
3409 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3410
3411 std::vector<int32_t> axis;
3412 if (!axisOperand)
3413 {
3414 axis.assign(dimensionSequence,
3415 dimensionSequence + rank);
3416 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003417 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003418 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003419 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003420 }
3421
3422 std::vector<uint32_t> outputDims;
3423 for (unsigned int i = 0; i < rank; i++)
3424 {
3425 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3426 auto currentDimension = inputInfo.GetShape()[i];
3427 if (skipSqueeze || currentDimension != 1)
3428 {
3429 outputDims.push_back(currentDimension);
3430 }
3431 }
3432
3433 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3434
3435 armnn::TensorInfo outputInfo = inputInfo;
3436 outputInfo.SetShape(outShape);
3437
3438 armnn::ReshapeDescriptor reshapeDesc;
3439 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3440
3441 bool isSupported = false;
3442 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3443 IsReshapeSupported,
3444 data.m_Backends,
3445 isSupported,
3446 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003447 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003448 reshapeDesc);
3449 if (!isSupported)
3450 {
3451 return false;
3452 }
3453
3454 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3455 assert(layer != nullptr);
3456 input.Connect(layer->GetInputSlot(0));
3457
3458 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3459}
3460
3461template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003462 typename HalOperation = typename HalPolicy::Operation,
3463 typename HalModel = typename HalPolicy::Model>
3464bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003465{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003466 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003467
3468 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3469 if (!input.IsValid())
3470 {
3471 return Fail("%s: Operation has invalid inputs", __func__);
3472 }
3473
3474 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3475 unsigned int rank = inputInfo.GetNumDimensions();
3476 if (rank > 4)
3477 {
3478 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3479 }
3480
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003481 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003482 if (!output)
3483 {
3484 return Fail("%s: Could not read output 0", __func__);
3485 }
3486
3487 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3488 if (IsDynamicTensor(outputInfo))
3489 {
3490 return Fail("%s: Dynamic output tensors are not supported", __func__);
3491 }
3492
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003493 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3494 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3495 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003496
3497 std::vector<int32_t> beginValues;
3498 std::vector<int32_t> endValues;
3499 std::vector<int32_t> stridesValues;
3500
3501 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003502 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003503 {
3504 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3505 {
3506 return false;
3507 }
3508
3509 if (operandValues.size() != rank)
3510 {
3511 return false;
3512 }
3513
3514 return true;
3515 };
3516
3517 if (!ValidateInputOperands(*beginOperand, beginValues)
3518 || !ValidateInputOperands(*endOperand, endValues)
3519 || !ValidateInputOperands(*stridesOperand, stridesValues))
3520 {
3521 return Fail("%s: Operation has invalid input operand", __func__);
3522 }
3523
3524 // Stride cannot have value '0'
3525 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3526 {
3527 return Fail("%s: Stride must be non-zero value.", __func__);
3528 }
3529
3530 armnn::StridedSliceDescriptor descriptor;
3531 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3532 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3533 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3534 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3535
3536 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3537 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3538 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3539 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3540 {
3541 return Fail("%s: Operation has invalid inputs", __func__);
3542 }
3543
3544 bool isSupported = false;
3545 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3546 IsStridedSliceSupported,
3547 data.m_Backends,
3548 isSupported,
3549 inputInfo,
3550 outputInfo,
3551 descriptor);
3552 if (!isSupported)
3553 {
3554 return false;
3555 }
3556
3557 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3558 assert(layer != nullptr);
3559 input.Connect(layer->GetInputSlot(0));
3560
3561 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3562}
3563
3564template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003565 typename HalOperation = typename HalPolicy::Operation,
3566 typename HalModel = typename HalPolicy::Model>
3567bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003568{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003569 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003570
3571 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3572 if (!input.IsValid())
3573 {
3574 return Fail("%s: Operation has invalid inputs", __func__);
3575 }
3576
3577 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3578 unsigned int rank = inputInfo.GetNumDimensions();
3579 if (rank > 4)
3580 {
3581 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3582 }
3583
3584 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3585 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003586 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003587
3588 std::vector<int32_t> perm(rank);
3589 if (!permOperand)
3590 {
3591 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3592 for (unsigned int i = rank; i > 0; i--)
3593 {
3594 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3595 }
3596 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003597 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003598 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003599 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003600 }
3601
3602 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3603
Mike Kelly4a956582020-02-28 10:32:09 +00003604 armnn::TransposeDescriptor transposeDesc;
3605 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003606
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003607 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003608 if (!output)
3609 {
3610 return Fail("%s: Could not read output 0", __func__);
3611 }
3612
3613 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003614 if (IsDynamicTensor(outputInfo))
3615 {
3616 return Fail("%s: Dynamic output tensors are not supported", __func__);
3617 }
3618
Mike Kelly46272802019-08-14 17:00:48 +01003619
3620 bool isSupported = false;
3621 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003622 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003623 data.m_Backends,
3624 isSupported,
3625 inputInfo,
3626 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003627 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003628 if (!isSupported)
3629 {
3630 return false;
3631 }
3632
Mike Kelly4a956582020-02-28 10:32:09 +00003633 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003634 assert(layer != nullptr);
3635 input.Connect(layer->GetInputSlot(0));
3636
3637 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3638}
3639
3640template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003641 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003642 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003643 typename HalModel = typename HalPolicy::Model>
3644bool ConvertBatchToSpaceNd(const HalOperation& operation,
3645 const HalModel& model,
3646 ConversionData& data)
3647{
Finn Williams23b87b32019-07-30 11:44:05 +01003648
3649 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3650 if (!input.IsValid())
3651 {
3652 return Fail("%s: Operation has invalid inputs", __func__);
3653 }
3654
3655 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3656 if (!output)
3657 {
3658 return Fail("%s: Could not read output 0", __func__);
3659 }
3660
3661 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3662 if (IsDynamicTensor(outputInfo))
3663 {
3664 return Fail("%s: Dynamic output tensors are not supported", __func__);
3665 }
3666
3667 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3668 if (!blockOperand)
3669 {
3670 return Fail("%s: Could not read input 1", __func__);
3671 }
3672
3673 // Convert the block operand to int32
3674 std::vector<int32_t> block;
3675 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3676 {
3677 return Fail("%s: Input 1 has invalid values", __func__);
3678 }
3679
3680 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3681
3682 unsigned int rank = inputInfo.GetNumDimensions();
3683 if (rank != 4)
3684 {
3685 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3686 }
3687
3688 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3689 {
3690 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3691 " greater than or equal to 1", __func__);
3692 }
3693
3694 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3695 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3696 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3697
Kevin May42477c12020-03-26 13:34:14 +00003698 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01003699 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003700 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003701 }
3702 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3703 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3704
3705 bool isSupported = false;
3706 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3707 IsBatchToSpaceNdSupported,
3708 data.m_Backends,
3709 isSupported,
3710 inputInfo,
3711 outputInfo,
3712 batchToSpaceNdDesc);
3713 if (!isSupported)
3714 {
3715 return false;
3716 }
3717
3718 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3719 assert(layer != nullptr);
3720 input.Connect(layer->GetInputSlot(0));
3721
3722 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3723}
Mike Kelly0a879362019-07-29 16:56:31 +01003724
Finn Williamsd74c5052019-07-30 17:06:00 +01003725template<typename HalPolicy,
3726 typename HalOperation = typename HalPolicy::Operation,
3727 typename HalOperand = typename HalPolicy::Operand,
3728 typename HalModel = typename HalPolicy::Model>
3729bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3730{
3731 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3732 if (!input.IsValid())
3733 {
3734 return Fail("%s: Operation has invalid inputs", __func__);
3735 }
3736
3737 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3738 unsigned int rank = inputInfo.GetNumDimensions();
3739 unsigned int spatialDim = rank - 2;
3740
3741 if (rank != 4)
3742 {
3743 Fail("%s: Only inputs with rank 4 are supported", __func__);
3744 }
3745
3746 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3747 if (!output)
3748 {
3749 return Fail("%s: Could not read output 0", __func__);
3750 }
3751
3752 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3753 if (IsDynamicTensor(outputInfo))
3754 {
3755 return Fail("%s: Dynamic output tensors are not supported", __func__);
3756 }
3757
3758 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3759 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3760
3761 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3762 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3763 {
3764 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3765 }
3766
3767 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003768 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3769 {
3770 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3771 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003772 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3773 {
3774 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3775 }
3776
3777 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3778 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3779 {
3780 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3781 }
3782
3783 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3784 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003785 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3786 {
3787 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3788 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003789 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3790 {
3791 int paddingBeforeInput = paddings[i];
3792 int paddingAfterInput = paddings[i + 1];
3793 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3794 {
3795 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3796 }
3797
3798 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3799 }
3800
3801 armnn::SpaceToBatchNdDescriptor descriptor;
3802 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3803 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3804 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3805
Kevin May42477c12020-03-26 13:34:14 +00003806 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01003807 {
3808 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3809 }
3810
3811 bool isSupported = false;
3812 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3813 IsSpaceToBatchNdSupported,
3814 data.m_Backends,
3815 isSupported,
3816 inputInfo,
3817 outputInfo,
3818 descriptor);
3819 if (!isSupported)
3820 {
3821 return false;
3822 }
3823
3824 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3825 assert(layer != nullptr);
3826 input.Connect(layer->GetInputSlot(0));
3827
3828 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3829}
3830
saoste01b8471482018-10-10 09:44:51 +01003831} // namespace armnn_driver