blob: 997c9cc24b37cd07aa7f295492c2075817b5bdba [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000015#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
Kevin Mayec1e5b82020-02-26 17:00:39 +000038#ifdef ARMNN_ANDROID_R
39using OperandType = android::nn::hal::OperandType;
40#endif
41
arovir01b0717b52018-09-05 17:03:25 +010042struct ConversionData
43{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010044 ConversionData(const std::vector<armnn::BackendId>& backends)
45 : m_Backends(backends)
46 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010047 {}
48
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010049 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010050 armnn::INetworkPtr m_Network;
51 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
52 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
53};
54
55class LayerInputHandle
56{
57public:
58 LayerInputHandle();
59 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
60
61 bool IsValid() const;
62
63 void Connect(armnn::IInputSlot& inputSlot);
64
65 const armnn::TensorInfo& GetTensorInfo() const;
66
67private:
68 armnn::IOutputSlot* m_OutputSlot;
69 bool m_Valid;
70 armnn::TensorInfo m_TensorInfo;
71};
72
73class ConstTensorPin
74{
75public:
76 // Creates an invalid tensor pin (can be used to signal errors)
77 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
78 ConstTensorPin(bool optional = false);
79
80 // @param tensorInfo TensorInfo associated with the tensor.
81 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
82 // the model being converted.
83 // @param numBytes Number of bytes for the tensor data.
84 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
85 const armnn::PermutationVector& mappings);
86
87 ConstTensorPin(const ConstTensorPin& other) = delete;
88 ConstTensorPin(ConstTensorPin&& other) = default;
89
90 bool IsValid() const;
91 bool IsOptional() const;
92
93 const armnn::ConstTensor& GetConstTensor() const;
94 const armnn::ConstTensor* GetConstTensorPtr() const;
95
96private:
97 armnn::ConstTensor m_ConstTensor;
98
99 // Owned memory for swizzled tensor data, only required if the tensor needed
100 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
101 // the pools associated with the model being converted.
102 std::vector<uint8_t> m_SwizzledTensorData;
103
104 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
105 bool m_Optional;
106};
107
108} // namespace armnn_driver
109
110///
111/// Utility functions
112///
113
114namespace
115{
116
117using namespace armnn_driver;
118using namespace android::nn;
119
120// Convenience function to log the reason for failing to convert a model.
121// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
122template<class... Args>
123static bool Fail(const char* formatStr, Args&&... args)
124{
125 ALOGD(formatStr, std::forward<Args>(args)...);
126 return false;
127}
128
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100129// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
130// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
131#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100132try \
133{ \
134 for (auto&& backendId : backends) \
135 { \
136 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
137 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100139 std::string reasonIfUnsupported; \
140 supported = \
141 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
142 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100145 } \
146 else \
147 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100148 if (reasonIfUnsupported.size() > 0) \
149 { \
150 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
151 } \
152 else \
153 { \
154 ALOGD("%s: not supported by armnn", funcName); \
155 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100156 } \
157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100159 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100160 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100161 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100162 } \
163 if (!supported) \
164 { \
165 ALOGD("%s: not supported by any specified backend", funcName); \
166 } \
167} \
168catch (const armnn::InvalidArgumentException &e) \
169{ \
170 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
171}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100172
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000173template<typename HalOperand>
174armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100175{
176 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
177}
178
Matthew Bentham912b3622019-05-03 15:49:14 +0100179inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100180{
Matthew Bentham912b3622019-05-03 15:49:14 +0100181 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
182 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
183 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100184}
185
Mike Kellyb5fdf382019-06-11 16:35:25 +0100186#ifdef ARMNN_ANDROID_NN_V1_2
187
Keith Davis71006492020-01-06 17:44:16 +0000188// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100189inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
190{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 return type == V1_2::OperandType::BOOL ||
192 type == V1_2::OperandType::TENSOR_FLOAT16 ||
193 type == V1_2::OperandType::TENSOR_FLOAT32 ||
194 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000195 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000196 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
197 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100198 type == V1_2::OperandType::TENSOR_INT32;
199}
200
201#endif
202
203inline bool IsBool(V1_0::Operand)
204{
205 return false;
206}
207
Sadik Armagan61113162019-07-25 09:09:40 +0100208inline bool Is12Operand(V1_0::Operand)
209{
210 return false;
211}
212
Mike Kellyb5fdf382019-06-11 16:35:25 +0100213#ifdef ARMNN_ANDROID_NN_V1_2
214
215inline bool IsBool(V1_2::Operand operand)
216{
217 return operand.type == V1_2::OperandType::BOOL;
218}
219
Sadik Armagan61113162019-07-25 09:09:40 +0100220/// Checks if a operand is 1_2 Operand
221inline bool Is12Operand(V1_2::Operand)
222{
223 return true;
224}
225
Mike Kellyb5fdf382019-06-11 16:35:25 +0100226#endif
227
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100228template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000229armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
230 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100231 armnn::TensorInfo reshapeInfo)
232{
233 armnn::ReshapeDescriptor reshapeDescriptor;
234 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
235
236 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
237 BOOST_ASSERT(reshapeLayer != nullptr);
238
239 // Attach the input layer to the reshape layer
240 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
241 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
242
243 return *reshapeLayer;
244}
245
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000246bool BroadcastTensor(LayerInputHandle& input0,
247 LayerInputHandle& input1,
248 armnn::IConnectableLayer* startLayer,
249 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100250{
251 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100252
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100253 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
254 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
255
256 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
257 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
258
259 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100260 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100261 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
262 input0.Connect(startLayer->GetInputSlot(0));
263 input1.Connect(startLayer->GetInputSlot(1));
264
Sadik Armagan64b19b52019-08-19 09:49:58 +0100265 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100266 }
267
268 // Since the number of dimensions do not match then we need to add degenerate dimensions
269 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
270
271 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
272 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
273 boost::numeric_cast<int>(inputDimensions1));
274
275 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
276 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
277 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
278
279 const armnn::TensorShape& smallShape = smallInfo.GetShape();
280 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
281 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
282 {
283 reshapedDimensions[i] = smallShape[i - sizeDifference];
284 }
285
286 armnn::TensorInfo reshapedInfo = smallInfo;
287 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
288 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100289
290 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
291 armnn::ReshapeDescriptor reshapeDescriptor;
292
293 bool isSupported = false;
294 FORWARD_LAYER_SUPPORT_FUNC(__func__,
295 IsReshapeSupported,
296 data.m_Backends,
297 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000298 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100299 reshapedInfo,
300 reshapeDescriptor);
301 if (!isSupported)
302 {
303 return false;
304 }
305
306 BOOST_ASSERT(data.m_Network != nullptr);
307 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100308
309 if (input0IsSmaller)
310 {
311 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
312 //
313 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100314 // | |
315 // Reshape |
316 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100318
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
320 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100321 }
322 else
323 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100324 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
325 //
326 // Input0 Input1
327 // | |
328 // | Reshape
329 // \ /
330 // StartLayer
331
arovir01b0717b52018-09-05 17:03:25 +0100332 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100333 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100334 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100335
336 return true;
arovir01b0717b52018-09-05 17:03:25 +0100337}
338
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000339void CalcPadding(uint32_t input,
340 uint32_t kernel,
341 uint32_t stride,
342 uint32_t& outPadHead,
343 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100344 android::nn::PaddingScheme scheme)
345{
346 int32_t padHead;
347 int32_t padTail;
348 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
349 outPadHead = boost::numeric_cast<uint32_t>(padHead);
350 outPadTail = boost::numeric_cast<uint32_t>(padTail);
351}
352
Mike Kelly86b36d42019-07-12 16:39:33 +0100353#ifdef ARMNN_ANDROID_NN_V1_2
354
355void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
356 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
357{
358 int32_t padHead;
359 int32_t padTail;
360 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
361 outPadHead = boost::numeric_cast<uint32_t>(padHead);
362 outPadTail = boost::numeric_cast<uint32_t>(padTail);
363}
364
Mike Kelly26123db2020-01-15 10:02:33 +0000365void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100366 int32_t& outPadTail, android::nn::PaddingScheme scheme)
367{
368 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
369}
370
Mike Kelly86b36d42019-07-12 16:39:33 +0100371#endif
372
Matthew Bentham912b3622019-05-03 15:49:14 +0100373Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100374{
375 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100376 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100377 shape.dimensions = operand.dimensions;
378 shape.scale = operand.scale;
379 shape.offset = operand.zeroPoint;
380 return shape;
381}
382
Mike Kelly46272802019-08-14 17:00:48 +0100383#ifdef ARMNN_ANDROID_NN_V1_2
384
385Shape GetOperandShape(const V1_2::Operand& operand)
386{
387 Shape shape;
388 shape.type = OperandType(operand.type);
389 shape.dimensions = operand.dimensions;
390 shape.scale = operand.scale;
391 shape.offset = operand.zeroPoint;
392 return shape;
393}
394
395#endif
396
arovir01b0717b52018-09-05 17:03:25 +0100397// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
398// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100399// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
400// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100401void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000402 const armnn::TensorInfo& weightInfo,
403 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100404{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000405 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100406 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000407 // NOTE: Bias scale is always set to 0 for per-axis quantization and
408 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
409 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100410 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000411 return biasScale * inputInfo.GetQuantizationScale();
412 };
413
414 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
415 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
416
417 biasInfo.SetQuantizationScales(biasScales);
418 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
419
420 ALOGV("Bias quantization params have been updated for per-axis quantization");
421 }
422 else
423 {
424 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
425 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
426 {
427 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
428 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
429 {
430 ALOGW("Bias quantization scale has been modified to match input * weights");
431 biasInfo.SetQuantizationScale(expectedBiasScale);
432 }
arovir01b0717b52018-09-05 17:03:25 +0100433 }
434 }
435}
436
437// 4D Tensor Permutations
438const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100439const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
440
441// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000442const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
443const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100444
445template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000446armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
447 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100448{
449 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000450 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100451
452 BOOST_ASSERT(layer != nullptr);
453
454 // Connect input to swizzle layer
455 input.Connect(layer->GetInputSlot(0));
456
457 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000458 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100459 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
460
461 return *layer;
462}
463
arovir01b0717b52018-09-05 17:03:25 +0100464bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
465 const armnn::TensorShape & outputShape,
466 uint32_t concatDim)
467{
468 // Validate the output shape is correct given the input shapes (which have just been validated)
469 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
470 if (outputShape.GetNumDimensions() != numDimensions)
471 {
472 return Fail("%s: Output shape has wrong number of dimensions", __func__);
473 }
474
475 unsigned int outputSizeAlongConcatenatedDimension = 0;
476 for (unsigned int i = 0; i < inputShapes.size(); i++)
477 {
478 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
479 }
480
481 for (unsigned int i = 0; i < numDimensions; ++i)
482 {
483 if (i == concatDim)
484 {
485 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
486 {
487 return Fail(
488 "%s: Invalid output shape for dimension %d (%d != %d)",
489 __func__,
490 i,
491 outputShape[i],
492 outputSizeAlongConcatenatedDimension);
493 }
494 }
495 else
496 {
497 if (outputShape[i] != inputShapes[0][i])
498 {
499 return Fail("%s: Invalid output shape", __func__);
500 }
501 }
502 }
503
504 return true;
505}
506
507bool RequiresReshape(armnn::TensorShape & inputShape)
508{
509 return inputShape.GetNumDimensions() < 3;
510}
511
arovir01b0717b52018-09-05 17:03:25 +0100512void SwizzleInputs(armnn::INetwork& network,
513 std::vector<LayerInputHandle>& inputs,
514 std::vector<armnn::TensorShape>& inputShapes,
515 const armnn::PermutationVector& mapping)
516{
517 if (!mapping.IsEqual(IdentityPermutation4D))
518 {
519 size_t nInputs = inputs.size();
520 for (size_t i=0; i<nInputs; ++i)
521 {
522 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000523 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100524 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
525 auto& outputInfo = outputSlot.GetTensorInfo();
526 // replace inputs with the swizzled ones
527 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
528 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
529 }
530 }
531}
532
Kevin Mayaed08ac2019-12-12 16:33:31 +0000533bool CheckReshapeSupported(ConversionData& data,
534 std::vector<LayerInputHandle>& inputs,
535 std::vector<armnn::TensorShape>& inputShapes,
536 const armnn::PermutationVector& mapping,
537 const armnn::TensorInfo& outputInfo)
538{
539 if (!mapping.IsEqual(IdentityPermutation4D))
540 {
541 size_t nInputs = inputs.size();
542 for (size_t i=0; i<nInputs; ++i)
543 {
544 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000545 armnn::TransposeDescriptor transposeDesc;
546 transposeDesc.m_DimMappings = mapping;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000547
548 bool isSupported = false;
549 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000550 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000551 data.m_Backends,
552 isSupported,
553 inputs[i].GetTensorInfo(),
554 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000555 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000556 if (!isSupported)
557 {
558 return false;
559 }
560
561 }
562 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
563 }
564 return true;
565}
566
567
narpra01f176d5a2018-11-18 20:17:48 +0000568bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
569 int32_t & concatDimension,
570 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100571{
narpra01f176d5a2018-11-18 20:17:48 +0000572 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100573 BOOST_ASSERT(numberOfDimensions >= 3);
574
575 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000576 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
577 // or along dimension 0 or 2 for a 3-D tensor.
578 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100579 {
narpra01f176d5a2018-11-18 20:17:48 +0000580 concatDimension = 1;
581 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
582 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100583 }
narpra01f176d5a2018-11-18 20:17:48 +0000584 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100585 {
narpra01f176d5a2018-11-18 20:17:48 +0000586 concatDimension = 0;
587 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
588 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100589 }
narpra01f176d5a2018-11-18 20:17:48 +0000590 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100591}
592
593} // anonymous namespace
594
595namespace armnn_driver
596{
597
598//// Creates an ArmNN activation layer and connects it to the given layer, if the
599//// passed in AndroidNN activation function requires so.
600//// @return The end layer of the sequence of layers built for the given AndroidNN
601//// activation function or nullptr if an error occurred (e.g. unsupported activation).
602//// Note that the end layer matches the input layer if no activation is required
603//// (the sequence of layers has length 1).
604armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
605 ActivationFn activation,
606 armnn::IConnectableLayer* prevLayer,
607 ConversionData& data);
608
609} // namespace armnn_driver
610
611///
612/// Utility templates
613///
614
615namespace armnn_driver
616{
617
618using namespace android::nn;
619
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100620template<typename HalPolicy,
621 typename HalOperand = typename HalPolicy::Operand,
622 typename HalOperation = typename HalPolicy::Operation,
623 typename HalModel = typename HalPolicy::Model>
624const HalOperand* GetInputOperand(const HalOperation& operation,
625 uint32_t inputIndex,
626 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100627 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100628{
629 if (inputIndex >= operation.inputs.size())
630 {
saoste01b8471482018-10-10 09:44:51 +0100631 if (failOnIndexOutOfBounds)
632 {
633 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
634 }
arovir01b0717b52018-09-05 17:03:25 +0100635 return nullptr;
636 }
637
638 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
639 return &model.operands[operation.inputs[inputIndex]];
640}
641
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642template<typename HalPolicy,
643 typename HalOperand = typename HalPolicy::Operand,
644 typename HalOperation = typename HalPolicy::Operation,
645 typename HalModel = typename HalPolicy::Model>
646const HalOperand* GetOutputOperand(const HalOperation& operation,
647 uint32_t outputIndex,
648 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100649{
650 if (outputIndex >= operation.outputs.size())
651 {
652 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
653 return nullptr;
654 }
655
656 // Model should have been validated beforehand
657 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
658
659 return &model.operands[operation.outputs[outputIndex]];
660}
661
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100662template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100663 typename HalOperand = typename HalPolicy::Operand,
664 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100665const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100666 const HalModel& model,
667 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000668 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100669{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100671
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100673 switch (operand.lifetime)
674 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100675 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100676 {
677 // Constant found in model.operandValues
678 valueStart = &model.operandValues[operand.location.offset];
679 break;
680 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100682 {
683 // Constant specified via a Memory object
684 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
685 break;
686 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100687 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000688 {
689 // An optional input tensor with no values is not an error so should not register as a fail
690 if (optional)
691 {
692 valueStart = nullptr;
693 break;
694 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100695 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000696 }
arovir01b0717b52018-09-05 17:03:25 +0100697 default:
698 {
699 // Unsupported/invalid (e.g. can't get value of an input to the model)
700 Fail("%s: unsupported/invalid operand lifetime: %s",
701 __func__, toString(operand.lifetime).c_str());
702 valueStart = nullptr;
703 }
704 }
705
706 return valueStart;
707}
708
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100710 typename HalOperation = typename HalPolicy::Operation,
711 typename HalModel = typename HalPolicy::Model,
712 typename HalOperandType = typename HalPolicy::OperandType>
713bool GetOperandType(const HalOperation& operation,
714 uint32_t inputIndex,
715 const HalModel& model,
716 HalOperandType& type)
717{
718 using HalOperand = typename HalPolicy::Operand;
719
720 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
721 if (!operand)
722 {
723 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
724 }
725
726 type = operand->type;
727 return true;
728}
729
730template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000731 typename HalOperand = typename HalPolicy::Operand>
732bool IsOperandConstant(const HalOperand& operand)
733{
734 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
735
736 HalOperandLifeTime lifetime = operand.lifetime;
737
738 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
739 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
740 lifetime == HalOperandLifeTime::NO_VALUE;
741}
742
743template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100744 typename HalOperand = typename HalPolicy::Operand,
745 typename HalModel = typename HalPolicy::Model>
746ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
747 const HalModel& model,
748 const ConversionData& data,
749 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
750 const armnn::TensorShape* overrideTensorShape = nullptr,
751 bool optional = false)
752{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 if (!IsOperandTypeSupportedForTensors(operand.type))
754 {
755 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
756 return ConstTensorPin();
757 }
758
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000759 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100760 {
761 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
762 return ConstTensorPin();
763 }
764
765 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
766 if (!valueStart)
767 {
768 if (optional)
769 {
770 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
771 return ConstTensorPin(true);
772 }
773 // mandatory tensor with no values
774 Fail("%s: failed to get operand address", __func__);
775 return ConstTensorPin();
776 }
777
778 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000779 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
780 if (tensorInfo.HasPerAxisQuantization())
781 {
782 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
783 }
784
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100785 if (overrideTensorShape != nullptr)
786 {
787 tensorInfo.SetShape(*overrideTensorShape);
788 }
789 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
790}
791
792template<typename HalPolicy,
793 typename HalOperation = typename HalPolicy::Operation,
794 typename HalModel = typename HalPolicy::Model>
795ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
796 uint32_t inputIndex,
797 const HalModel& model,
798 const ConversionData& data,
799 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
800 const armnn::TensorShape* overrideTensorShape = nullptr,
801 bool optional = false)
802{
803 using HalOperand = typename HalPolicy::Operand;
804
805 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
806 if (!operand)
807 {
808 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
809 return ConstTensorPin();
810 }
811 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
812 model,
813 data,
814 dimensionMappings,
815 overrideTensorShape,
816 optional);
817}
818
819template<typename HalPolicy,
820 typename OutputType,
821 typename HalOperandType = typename HalPolicy::OperandType,
822 typename HalOperation = typename HalPolicy::Operation,
823 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100824bool GetInputScalar(const HalOperation& operation,
825 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100826 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100827 OutputType& outValue,
828 const HalModel& model,
829 const ConversionData& data)
830{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100831 using HalOperand = typename HalPolicy::Operand;
832
833 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100834 if (!operand)
835 {
836 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
837 }
838
839 if (operand->type != type)
840 {
841 return Fail("%s: unexpected operand type: %s (should be %s)",
842 __func__, toString(operand->type).c_str(), toString(type).c_str());
843 }
844
845 if (operand->location.length != sizeof(OutputType))
846 {
847 return Fail("%s: incorrect operand location length: %i (should be %i)",
848 __func__, operand->location.length, sizeof(OutputType));
849 }
850
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100851 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100852 if (!valueAddress)
853 {
854 return Fail("%s: failed to get address for operand", __func__);
855 }
856
857 outValue = *(static_cast<const OutputType*>(valueAddress));
858 return true;
859}
860
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100861template<typename HalPolicy,
862 typename HalOperation = typename HalPolicy::Operation,
863 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100864bool GetInputInt32(const HalOperation& operation,
865 uint32_t inputIndex,
866 int32_t& outValue,
867 const HalModel& model,
868 const ConversionData& data)
869{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100870 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100871}
872
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100873template<typename HalPolicy,
874 typename HalOperation = typename HalPolicy::Operation,
875 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100876bool GetInputFloat32(const HalOperation& operation,
877 uint32_t inputIndex,
878 float& outValue,
879 const HalModel& model,
880 const ConversionData& data)
881{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100882 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100883}
884
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885template<typename HalPolicy,
886 typename HalOperation = typename HalPolicy::Operation,
887 typename HalOperandType = typename HalPolicy::OperandType,
888 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100889bool GetInputActivationFunctionImpl(const HalOperation& operation,
890 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100891 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100892 ActivationFn& outActivationFunction,
893 const HalModel& model,
894 const ConversionData& data)
895{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100896 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100897 {
898 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
899 __func__,
900 toString(type).c_str(),
901 toString(OperandType::INT32).c_str(),
902 toString(OperandType::TENSOR_INT32).c_str());
903 }
904
905 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100906 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: failed to get activation input value", __func__);
909 }
910 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
911 return true;
912}
913
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914template<typename HalPolicy,
915 typename HalOperation = typename HalPolicy::Operation,
916 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100917bool GetInputActivationFunction(const HalOperation& operation,
918 uint32_t inputIndex,
919 ActivationFn& outActivationFunction,
920 const HalModel& model,
921 const ConversionData& data)
922{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100923 return GetInputActivationFunctionImpl<HalPolicy>(operation,
924 inputIndex,
925 HalPolicy::OperandType::INT32,
926 outActivationFunction,
927 model,
928 data);
arovir01b0717b52018-09-05 17:03:25 +0100929}
930
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100931template<typename HalPolicy,
932 typename HalOperation = typename HalPolicy::Operation,
933 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100934bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
935 uint32_t inputIndex,
936 ActivationFn& outActivationFunction,
937 const HalModel& model,
938 const ConversionData& data)
939{
940 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100941 return GetInputActivationFunctionImpl<HalPolicy>(operation,
942 inputIndex,
943 HalPolicy::OperandType::INT32,
944 outActivationFunction,
945 model,
946 data);
arovir01b0717b52018-09-05 17:03:25 +0100947}
948
949
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100950template<typename HalPolicy,
951 typename HalOperation = typename HalPolicy::Operation,
952 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100953bool GetOptionalInputActivation(const HalOperation& operation,
954 uint32_t inputIndex,
955 ActivationFn& activationFunction,
956 const HalModel& model,
957 const ConversionData& data)
958{
959 if (operation.inputs.size() <= inputIndex)
960 {
961 activationFunction = ActivationFn::kActivationNone;
962 }
963 else
964 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100965 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100966 {
967 return Fail("%s: Operation has invalid inputs", __func__);
968 }
969 }
970 return true;
971}
972
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973template<typename HalPolicy,
974 typename ConvolutionDescriptor,
975 typename HalOperation = typename HalPolicy::Operation,
976 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100977bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
978 uint32_t dilationXIndex,
979 ConvolutionDescriptor& descriptor,
980 const HalModel& model,
981 const ConversionData& data)
982{
983 bool success = true;
984 if (operation.inputs.size() >= dilationXIndex + 2)
985 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100986 success &= GetInputScalar<HalPolicy>(operation,
987 dilationXIndex,
988 HalPolicy::OperandType::INT32,
989 descriptor.m_DilationX,
990 model,
991 data);
992 success &= GetInputScalar<HalPolicy>(operation,
993 dilationXIndex + 1,
994 HalPolicy::OperandType::INT32,
995 descriptor.m_DilationY,
996 model,
997 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100998 }
999
1000 return success;
1001}
1002
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001003template<typename HalPolicy,
1004 typename HalOperand = typename HalPolicy::Operand,
1005 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001006bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001007 std::vector<int32_t>& outValues,
1008 const HalModel& model,
1009 const ConversionData& data)
1010{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001012 {
1013 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1014 }
1015
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001017 if (!startAddress)
1018 {
1019 return Fail("%s: failed to get operand address", __func__, operand.type);
1020 }
1021
1022 // Check number of bytes is sensible
1023 const uint32_t numBytes = operand.location.length;
1024 if (numBytes % sizeof(int32_t) != 0)
1025 {
1026 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1027 __func__, numBytes, sizeof(int32_t));
1028 }
1029
1030 outValues.resize(numBytes / sizeof(int32_t));
1031 memcpy(outValues.data(), startAddress, numBytes);
1032 return true;
1033}
1034
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001035template<typename HalPolicy,
1036 typename HalOperation = typename HalPolicy::Operation,
1037 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001038bool GetInputPaddingScheme(const HalOperation& operation,
1039 uint32_t inputIndex,
1040 PaddingScheme& outPaddingScheme,
1041 const HalModel& model,
1042 const ConversionData& data)
1043{
1044 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001045 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001046 {
1047 return Fail("%s: failed to get padding scheme input value", __func__);
1048 }
1049
1050 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1051 return true;
1052}
1053
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001054template<typename HalPolicy,
1055 typename HalOperation = typename HalPolicy::Operation,
1056 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001057LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1058 uint32_t inputIndex,
1059 const HalModel& model,
1060 ConversionData& data)
1061{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001062 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001063 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001064 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1065
1066 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001067 if (!operand)
1068 {
1069 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1070 return LayerInputHandle();
1071 }
1072
1073 if (!IsOperandTypeSupportedForTensors(operand->type))
1074 {
1075 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1076 return LayerInputHandle();
1077 }
1078
Sadik Armagan44bcc022019-06-18 17:21:36 +01001079 try
arovir01b0717b52018-09-05 17:03:25 +01001080 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001081 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001082 if (IsDynamicTensor(operandTensorInfo))
1083 {
1084 Fail("%s: dynamic input tensors are not supported", __func__);
1085 return LayerInputHandle();
1086 }
arovir01b0717b52018-09-05 17:03:25 +01001087
Sadik Armagan44bcc022019-06-18 17:21:36 +01001088 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001089 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001090 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001091 {
1092 // NOTE: We must check whether we can support the input tensor on at least one
1093 // of the provided backends; otherwise we cannot convert the operation
1094 bool isInputSupported = false;
1095 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1096 IsInputSupported,
1097 data.m_Backends,
1098 isInputSupported,
1099 operandTensorInfo);
1100
1101 if (!isInputSupported)
1102 {
1103 Fail("%s: unsupported input tensor", __func__);
1104 return LayerInputHandle();
1105 }
1106
1107 BOOST_FALLTHROUGH; // intentional fallthrough
1108 }
1109 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001110 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001111 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001112 // The tensor is either an operand internal to the model, or a model input.
1113 // It can be associated with an ArmNN output slot for an existing layer.
1114
1115 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1116 const uint32_t operandIndex = operation.inputs[inputIndex];
1117 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001118 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001119 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001120 case HalOperandLifeTime::CONSTANT_REFERENCE:
1121 {
1122 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1123 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1124 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001125 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001126 bool isSupported = false;
1127 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1128 IsConstantSupported,
1129 data.m_Backends,
1130 isSupported,
1131 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001132 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001133 {
1134 return LayerInputHandle();
1135 }
1136
1137 armnn::IConnectableLayer* constantLayer =
1138 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1139 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1140 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1141
1142 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1143 }
1144 else
1145 {
1146 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001147 return LayerInputHandle();
1148 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001149 break;
arovir01b0717b52018-09-05 17:03:25 +01001150 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001151 default:
arovir01b0717b52018-09-05 17:03:25 +01001152 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001153 // Unsupported lifetime for an input tensor
1154 Fail("%s: unsupported lifetime for input tensor: %s",
1155 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001156 return LayerInputHandle();
1157 }
arovir01b0717b52018-09-05 17:03:25 +01001158 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001159 }
1160 catch (UnsupportedOperand<HalOperandType>& e)
1161 {
1162 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1163 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001164 }
1165}
1166
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001167template<typename HalPolicy,
1168 typename HalOperation = typename HalPolicy::Operation,
1169 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001170bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1171 uint32_t operationOutputIndex,
1172 armnn::IConnectableLayer& layer,
1173 uint32_t layerOutputIndex,
1174 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001175 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001176{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177 using HalOperand = typename HalPolicy::Operand;
1178
1179 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001180 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1181 {
1182 return false;
1183 }
1184
1185 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1186
1187 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1188 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1189
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001190 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001191
1192 return true;
1193}
1194
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001195template<typename HalPolicy,
1196 typename HalOperation = typename HalPolicy::Operation,
1197 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001198armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1199 uint32_t inputIndex,
1200 const HalModel& model,
1201 ConversionData& data)
1202{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001203 using HalOperand = typename HalPolicy::Operand;
1204
1205 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001206 if (!operand)
1207 {
1208 return armnn::DataLayout::NHWC;
1209 }
1210
1211 if (!IsBool(*operand))
1212 {
1213 return armnn::DataLayout::NHWC;
1214 }
1215
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001216 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001217 if (!valueAddress)
1218 {
1219 return armnn::DataLayout::NHWC;
1220 }
1221
1222 if (*(static_cast<const bool*>(valueAddress)))
1223 {
1224 return armnn::DataLayout::NCHW;
1225 }
1226 else
1227 {
1228 return armnn::DataLayout::NHWC;
1229 }
1230}
1231
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001232template<typename HalPolicy,
1233 typename HalOperation = typename HalPolicy::Operation,
1234 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001235bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1236 uint32_t outputIndex,
1237 armnn::IConnectableLayer& layer,
1238 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001239 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001240{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001241 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1242 outputIndex,
1243 layer,
1244 outputIndex,
1245 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001246 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001247}
1248
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001249template<typename HalPolicy,
1250 typename HalOperation = typename HalPolicy::Operation,
1251 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001252bool ConvertToActivation(const HalOperation& operation,
1253 const char* operationName,
1254 const armnn::ActivationDescriptor& activationDesc,
1255 const HalModel& model,
1256 ConversionData& data)
1257{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001258 using HalOperand = typename HalPolicy::Operand;
1259
1260 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001261 if (!input.IsValid())
1262 {
1263 return Fail("%s: Input 0 is invalid", operationName);
1264 }
1265
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001266 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001267 if (!outputOperand)
1268 {
1269 return false;
1270 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001271
1272 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001273 if (IsDynamicTensor(outInfo))
1274 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001275 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001276 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001277
1278 bool isSupported = false;
1279 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1280 IsActivationSupported,
1281 data.m_Backends,
1282 isSupported,
1283 input.GetTensorInfo(),
1284 outInfo,
1285 activationDesc);
1286 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001287 {
1288 return false;
1289 }
1290
1291 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1292 BOOST_ASSERT(layer != nullptr);
1293 input.Connect(layer->GetInputSlot(0));
1294
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001295 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001296}
1297
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001298template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001299 typename HalOperation = typename HalPolicy::Operation,
1300 typename HalModel = typename HalPolicy::Model>
1301bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1302{
1303 armnn::ActivationDescriptor desc;
1304 desc.m_Function = armnn::ActivationFunction::ReLu;
1305
1306 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1307}
1308
1309template<typename HalPolicy,
1310 typename HalOperation = typename HalPolicy::Operation,
1311 typename HalModel = typename HalPolicy::Model>
1312bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1313{
1314 armnn::ActivationDescriptor desc;
1315 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1316 desc.m_A = 1.0f;
1317 desc.m_B = -1.0f;
1318
1319 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1320}
1321
1322template<typename HalPolicy,
1323 typename HalOperation = typename HalPolicy::Operation,
1324 typename HalModel = typename HalPolicy::Model>
1325bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1326{
1327 armnn::ActivationDescriptor desc;
1328 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1329 desc.m_A = 6.0f;
1330
1331 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1332}
1333
1334template<typename HalPolicy,
1335 typename HalOperation = typename HalPolicy::Operation,
1336 typename HalModel = typename HalPolicy::Model>
1337bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1338{
1339 armnn::ActivationDescriptor desc;
1340 desc.m_Function = armnn::ActivationFunction::TanH;
1341 desc.m_A = 1.0f; // android nn does not support tanH parameters
1342 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1343
1344 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1345}
1346
1347template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001348 typename HalOperation = typename HalPolicy::Operation,
1349 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001350bool ConvertPaddings(const HalOperation& operation,
1351 const HalModel& model,
1352 ConversionData& data,
1353 unsigned int rank,
1354 armnn::PadDescriptor& padDescriptor)
1355{
1356 using HalOperand = typename HalPolicy::Operand;
1357
1358 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1359 if (!paddingsOperand)
1360 {
1361 return Fail("%s: Could not read paddings operand", __func__);
1362 }
1363
1364 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1365 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1366 {
1367 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1368 }
1369
1370 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001371 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1372 {
1373 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1374 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001375
1376 // add padding for each dimension of input tensor.
1377 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1378 {
1379 int paddingBeforeInput = paddings[i];
1380 int paddingAfterInput = paddings[i + 1];
1381
1382 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1383 {
1384 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1385 }
1386
1387 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1388 }
1389
1390 return true;
1391}
1392
1393template<typename HalPolicy,
1394 typename HalOperation = typename HalPolicy::Operation,
1395 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001396bool ConvertPooling2d(const HalOperation& operation,
1397 const char* operationName,
1398 armnn::PoolingAlgorithm poolType,
1399 const HalModel& model,
1400 ConversionData& data)
1401{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001402 using HalOperand = typename HalPolicy::Operand;
1403 using HalOperandType = typename HalPolicy::OperandType;
1404
1405 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001406 if (!input.IsValid())
1407 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001408 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001409 }
1410
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001411 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001412 if (!output)
1413 {
1414 return Fail("%s: Could not read output 0", __func__);
1415 }
1416
1417 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1418 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1419
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001420 if (IsDynamicTensor(outputInfo))
1421 {
1422 return Fail("%s: Dynamic output tensors are not supported", __func__);
1423 }
1424
arovir01b0717b52018-09-05 17:03:25 +01001425 armnn::Pooling2dDescriptor desc;
1426 desc.m_PoolType = poolType;
1427 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001428 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001429
1430 ActivationFn activation;
1431
Sadik Armagan15d63e22019-07-26 16:59:35 +01001432 auto inputSize = operation.inputs.size();
1433
1434 if (inputSize >= 10)
1435 {
1436 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1437 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1438 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1439 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1440 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1441 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1442 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1443 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1444 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1445 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1446 {
1447 return Fail("%s: Operation has invalid inputs", operationName);
1448 }
1449
1450 if (Is12Operand(*output))
1451 {
1452 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1453 }
1454 }
1455 else
arovir01b0717b52018-09-05 17:03:25 +01001456 {
1457 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1458 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001459 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1460 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1461 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1462 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1463 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1464 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001465 {
1466 return Fail("%s: Operation has invalid inputs", operationName);
1467 }
1468
Sadik Armagan15d63e22019-07-26 16:59:35 +01001469 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001470 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001471 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001472 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001473
1474 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1475 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1476 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1477
1478 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1479 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001480 }
1481
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001482 bool isSupported = false;
1483 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1484 IsPooling2dSupported,
1485 data.m_Backends,
1486 isSupported,
1487 inputInfo,
1488 outputInfo,
1489 desc);
1490 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001491 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001492 return false;
arovir01b0717b52018-09-05 17:03:25 +01001493 }
arovir01b0717b52018-09-05 17:03:25 +01001494
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001495 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1496 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001497 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001498 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001499 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001500
1501 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1502 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001503 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001504 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001505 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001506
1507 input.Connect(pooling2dLayer->GetInputSlot(0));
1508
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001509 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001510}
1511
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001512template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001513 typename HalOperation = typename HalPolicy::Operation,
1514 typename HalModel = typename HalPolicy::Model>
1515bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001516{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001517 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001518
1519 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1520 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1521
1522 if (!input0.IsValid() || !input1.IsValid())
1523 {
1524 return Fail("%s: Operation has invalid inputs", __func__);
1525 }
1526
1527 // The FuseActivation parameter is always the input index 2
1528 // and it should be optional
1529 ActivationFn activationFunction;
1530 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1531 {
1532 return Fail("%s: Operation has invalid inputs", __func__);
1533 }
1534
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001535 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001536 if (!outputOperand)
1537 {
1538 return false;
1539 }
1540
1541 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1542 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1543
1544 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1545 if (IsDynamicTensor(outputInfo))
1546 {
1547 return Fail("%s: Dynamic output tensors are not supported", __func__);
1548 }
1549
1550 bool isSupported = false;
1551 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1552 IsAdditionSupported,
1553 data.m_Backends,
1554 isSupported,
1555 inputInfo0,
1556 inputInfo1,
1557 outputInfo);
1558 if (!isSupported)
1559 {
1560 return false;
1561 }
1562
1563 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1564 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1565
1566 if (endLayer != nullptr)
1567 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001568 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001569 if (!isReshapeSupported)
1570 {
1571 return false;
1572 }
1573
Mike Kelly46272802019-08-14 17:00:48 +01001574 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1575 }
1576 else
1577 {
1578 return Fail("%s: ProcessActivation failed", __func__);
1579 }
1580}
1581
1582template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001583 typename HalOperation = typename HalPolicy::Operation,
1584 typename HalModel = typename HalPolicy::Model>
1585bool ConvertArgMinMax(const HalOperation& operation,
1586 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001587 ConversionData& data,
1588 armnn::ArgMinMaxFunction argMinMaxFunction)
1589{
1590 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1591
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001592 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001593 using HalOperandType = typename HalPolicy::OperandType;
1594
1595 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1596
1597 if (!input0.IsValid())
1598 {
1599 return Fail("%s: Operation has invalid inputs", __func__);
1600 }
1601
1602 int32_t axis;
1603 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1604 {
1605 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1606 }
1607
1608 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1609 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1610
1611 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1612 {
1613 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1614 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1615 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1616 return Fail("%s: Axis must be in range [-n, n)", __func__);
1617 }
1618
1619 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1620 if (!output)
1621 {
1622 return Fail("%s: Could not read output 0", __func__);
1623 }
1624
1625 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1626
1627 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1628 if (IsDynamicTensor(outputInfo))
1629 {
1630 return Fail("%s: Dynamic output tensors are not supported", __func__);
1631 }
1632
1633 armnn::ArgMinMaxDescriptor descriptor;
1634 descriptor.m_Function = argMinMaxFunction;
1635 descriptor.m_Axis = axis;
1636
1637 bool isSupported = false;
1638 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1639 IsArgMinMaxSupported,
1640 data.m_Backends,
1641 isSupported,
1642 inputInfo0,
1643 outputInfo,
1644 descriptor);
1645 if (!isSupported)
1646 {
1647 return false;
1648 }
1649
1650 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1651 assert(layer != nullptr);
1652
1653 input0.Connect(layer->GetInputSlot(0));
1654
1655 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1656}
1657
1658template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001659 typename HalOperation = typename HalPolicy::Operation,
1660 typename HalModel = typename HalPolicy::Model>
1661bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001662{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001663 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001664 using HalOperandType = typename HalPolicy::OperandType;
1665
1666 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1667 if (operation.inputs.size() <= 1)
1668 {
1669 return Fail("%s: Operation has insufficient arguments", __func__);
1670 }
1671
1672 // Get inputs and outputs
1673 const std::size_t numInputTensors = operation.inputs.size() - 1;
1674
1675 int32_t concatDim;
1676 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1677 {
1678 return Fail("%s: Operation has invalid inputs", __func__);
1679 }
1680
1681 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1682 if (!outputOperand)
1683 {
1684 return Fail("%s: Operation has no outputs", __func__);
1685 }
1686
1687
1688 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1689 armnn::TensorShape outputShape = outputInfo.GetShape();
1690
1691 //
1692 // handle negative concat dims along the lines of tensorflow as described here:
1693 // https://www.tensorflow.org/api_docs/python/tf/concat
1694 // "negative axis refers to axis + rank(values)-th dimension"
1695 //
1696 if (concatDim < 0)
1697 {
1698 concatDim += outputShape.GetNumDimensions();
1699 }
1700
1701 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1702 {
1703 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1704 }
1705
1706 std::vector<LayerInputHandle> inputHandles;
1707 std::vector<armnn::TensorShape> inputShapes;
1708
1709 inputHandles.reserve(numInputTensors);
1710 inputShapes.reserve(numInputTensors);
1711
1712 bool inputsHaveBeenReshaped = false;
1713 unsigned int tensorDimensionsAdded = 0;
1714
1715 for (uint32_t i = 0; i < numInputTensors; ++i)
1716 {
1717 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1718 if (!operand)
1719 {
1720 return Fail("%s: Operation has invalid inputs", __func__);
1721 }
1722
Teresa Charlin3b959602019-10-31 17:05:47 +00001723 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1724 if (!operandInputHandle.IsValid())
1725 {
1726 return Fail("%s: Operation has invalid inputs", __func__);
1727 }
Mike Kellyb8805202019-07-31 17:25:43 +01001728
Teresa Charlin3b959602019-10-31 17:05:47 +00001729 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001730 if (operandShape.GetNumDimensions() == 0)
1731 {
1732 return Fail("%s: Operands with rank 0 are not supported", __func__);
1733 }
1734
1735 if (RequiresReshape(operandShape))
1736 {
1737 inputsHaveBeenReshaped = true;
1738
1739 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1740
1741 // Expand the tensor to three dimensions
1742 if (operandShape.GetNumDimensions() == 2)
1743 {
1744 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1745 tensorDimensionsAdded = 1;
1746 }
1747 else
1748 {
1749 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1750 tensorDimensionsAdded = 2;
1751 }
1752
Kevin Mayaed08ac2019-12-12 16:33:31 +00001753 armnn::ReshapeDescriptor reshapeDescriptor;
1754 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1755
1756 bool isSupported = false;
1757 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1758 IsReshapeSupported,
1759 data.m_Backends,
1760 isSupported,
1761 operandInputHandle.GetTensorInfo(),
1762 reshapeInfo,
1763 reshapeDescriptor);
1764 if (!isSupported)
1765 {
1766 return false;
1767 }
1768
Mike Kellyb8805202019-07-31 17:25:43 +01001769 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1770 *data.m_Network,
1771 operandInputHandle,
1772 reshapeInfo
1773 );
1774
1775 // Point to the reshape operation rather then the input operation
1776 operandShape = reshapeInfo.GetShape();
1777 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1778 }
1779
1780 inputShapes.emplace_back(operandShape);
1781 inputHandles.emplace_back(operandInputHandle);
1782
1783 if (!inputHandles.back().IsValid())
1784 {
1785 return Fail("%s: Operation has invalid inputs", __func__);
1786 }
1787 }
1788
1789 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1790
1791 if (inputsHaveBeenReshaped)
1792 {
1793 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1794 concatDim += tensorDimensionsAdded;
1795
1796 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1797 if (tensorDimensionsAdded == 1)
1798 {
1799 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1800 }
1801 else if (tensorDimensionsAdded == 2)
1802 {
1803 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1804 }
1805 }
1806
1807 // Check if permutations is required and get the pair of permutations required for the concatenation.
1808 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1809 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1810 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1811
1812 bool needPermute =
1813 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1814
1815 if (needPermute)
1816 {
Mike Kelly4a956582020-02-28 10:32:09 +00001817 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01001818 }
1819
1820 outputInfo.SetShape(outputShape);
1821
1822 // this is no-op for identity swizzles, otherwise it replaces both
1823 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001824 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1825 {
1826 return false;
1827 }
Mike Kellyb8805202019-07-31 17:25:43 +01001828
1829 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1830 armnn::OriginsDescriptor concatDescriptor;
1831
1832 try
1833 {
1834 // The concat descriptor is always created across the only supported concat dimension
1835 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1836 concatDescriptor =
1837 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1838 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001839 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001840 {
1841 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1842 }
1843
1844 // Validate the output shape is correct given the input shapes based on the
1845 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1846 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1847 {
1848 return Fail("%s: Error validating the output shape for concat", __func__);
1849 }
1850
1851 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1852 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1853 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1854
1855 bool isSupported = false;
1856 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1857 IsConcatSupported,
1858 data.m_Backends,
1859 isSupported,
1860 inputTensorInfos,
1861 outputInfo,
1862 concatDescriptor);
1863 if (!isSupported)
1864 {
1865 return false;
1866 }
1867
1868 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1869 assert(layer != nullptr);
1870 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1871
1872 // Connect inputs to the layer
1873 const int numInputSlots = layer->GetNumInputSlots();
1874 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1875 for (int i = 0; i < numInputSlots; ++i)
1876 {
1877 // connect the input directly to the merge (concat) layer
1878 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1879 }
1880
1881 if (needPermute)
1882 {
Mike Kelly4a956582020-02-28 10:32:09 +00001883 armnn::TransposeDescriptor transposeDesc;
1884 transposeDesc.m_DimMappings = permutationPair.second;
Kevin Mayaed08ac2019-12-12 16:33:31 +00001885
1886 bool isSupported = false;
1887 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00001888 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00001889 data.m_Backends,
1890 isSupported,
1891 layer->GetOutputSlot(0).GetTensorInfo(),
1892 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00001893 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00001894 if (!isSupported)
1895 {
1896 return false;
1897 }
Mike Kellyb8805202019-07-31 17:25:43 +01001898 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00001899 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
1900 layer->GetOutputSlot(0),
1901 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01001902 layer = &deswizzleLayer;
1903 }
1904
1905 if (inputsHaveBeenReshaped)
1906 {
1907 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1908
1909 // Undo the reshape knowing the amount of dimensions added
1910 if (tensorDimensionsAdded == 1)
1911 {
1912 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1913 afterConcatInfo.GetShape()[2] }));
1914 }
1915 else if (tensorDimensionsAdded == 2)
1916 {
1917 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1918 }
1919
Kevin Mayaed08ac2019-12-12 16:33:31 +00001920 armnn::ReshapeDescriptor reshapeDescriptor;
1921 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1922
1923 bool isSupported = false;
1924 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1925 IsReshapeSupported,
1926 data.m_Backends,
1927 isSupported,
1928 layer->GetOutputSlot(0).GetTensorInfo(),
1929 afterConcatInfo,
1930 reshapeDescriptor);
1931 if (!isSupported)
1932 {
1933 return false;
1934 }
1935
Mike Kellyb8805202019-07-31 17:25:43 +01001936 layer = &AddReshapeLayer(
1937 *data.m_Network,
1938 layer->GetOutputSlot(0),
1939 afterConcatInfo
1940 );
1941 }
1942
1943 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1944}
1945
1946template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001947 typename HalOperation = typename HalPolicy::Operation,
1948 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001949bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1950{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001951 using HalOperand = typename HalPolicy::Operand;
1952 using HalOperandType = typename HalPolicy::OperandType;
1953
1954 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001955 if (!input.IsValid())
1956 {
1957 return Fail("%s: Operation has invalid inputs", __func__);
1958 }
1959
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001960 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001961 if (!output)
1962 {
1963 return Fail("%s: Could not read output 0", __func__);
1964 }
1965
1966 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001967 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001968
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001969 if (IsDynamicTensor(outputInfo))
1970 {
1971 return Fail("%s: Dynamic output tensors are not supported", __func__);
1972 }
1973
Mike Kellyb5fdf382019-06-11 16:35:25 +01001974 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001975 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1976 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001977
1978 if (!weightsPin.IsValid() || !biasPin.IsValid())
1979 {
1980 return Fail("%s: Operation has invalid inputs", __func__);
1981 }
1982
1983 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001984 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001985 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1986
1987 armnn::Convolution2dDescriptor desc;
1988 desc.m_DataLayout = armnn::DataLayout::NHWC;
1989 ActivationFn activation;
1990
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001991 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001992 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001993 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1994 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1995 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1996 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1997 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1998 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001999 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002000 {
2001 return Fail("%s: Operation has invalid inputs", __func__);
2002 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002003 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002004 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002005 {
2006 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002007 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2008 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2009 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002010 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002011 {
2012 return Fail("%s: Operation has invalid inputs", __func__);
2013 }
2014
2015 const uint32_t kernelX = weights.GetShape()[2];
2016 const uint32_t kernelY = weights.GetShape()[1];
2017 const uint32_t inputX = inputInfo.GetShape()[2];
2018 const uint32_t inputY = inputInfo.GetShape()[1];
2019
2020 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2021 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002022 }
2023 else
2024 {
2025 return Fail("%s: Unsupported number of operation inputs", __func__);
2026 }
2027
2028 desc.m_BiasEnabled = true;
2029 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2030
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002031 bool isSupported = false;
2032 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2033 IsConvolution2dSupported,
2034 data.m_Backends,
2035 isSupported,
2036 inputInfo,
2037 outputInfo,
2038 desc,
2039 weights.GetInfo(),
2040 biases);
2041 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002042 {
2043 return false;
2044 }
2045
2046 armnn::IConnectableLayer* startLayer =
2047 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2048
2049 if (!startLayer)
2050 {
2051 return Fail("%s: AddConvolution2dLayer failed", __func__);
2052 }
2053
2054 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2055
2056 if (!endLayer)
2057 {
2058 return Fail("%s: ProcessActivation failed", __func__);
2059 }
2060
2061 input.Connect(startLayer->GetInputSlot(0));
2062
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002063 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002064}
2065
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002066template<typename HalPolicy,
2067 typename HalOperation = typename HalPolicy::Operation,
2068 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002069bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2070{
2071 using HalOperand = typename HalPolicy::Operand;
2072 using HalOperandType = typename HalPolicy::OperandType;
2073
2074 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2075 if (!input.IsValid() )
2076 {
2077 return Fail("%s: Operation has invalid inputs", __func__);
2078 }
2079
2080 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2081 unsigned int rank = inputInfo.GetNumDimensions();
2082 if (rank != 4)
2083 {
2084 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2085 }
2086
2087 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2088 if (!output)
2089 {
2090 return Fail("%s: Could not read output 0", __func__);
2091 }
2092
2093 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2094 if (IsDynamicTensor(outputInfo))
2095 {
2096 return Fail("%s: Dynamic output tensors are not supported", __func__);
2097 }
2098
2099 armnn::DepthToSpaceDescriptor descriptor;
2100
2101 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2102 if (descriptor.m_BlockSize <= 1)
2103 {
2104 return Fail("%s: Block size must be at least 1 in all dimensions");
2105 }
2106
2107 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2108 if (Is12Operand(*output))
2109 {
2110 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2111 }
2112
2113 bool isSupported = false;
2114 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2115 IsDepthToSpaceSupported,
2116 data.m_Backends,
2117 isSupported,
2118 inputInfo,
2119 outputInfo,
2120 descriptor);
2121 if (!isSupported)
2122 {
2123 return false;
2124 }
2125
2126 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2127 assert(layer != nullptr);
2128 input.Connect(layer->GetInputSlot(0));
2129
2130 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2131}
2132
2133template<typename HalPolicy,
2134 typename HalOperation = typename HalPolicy::Operation,
2135 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002136bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2137{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002138 using HalOperand = typename HalPolicy::Operand;
2139 using HalOperandType = typename HalPolicy::OperandType;
2140
2141 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002142
2143 if (!input.IsValid())
2144 {
2145 return Fail("%s: Operation has invalid inputs", __func__);
2146 }
2147
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002148 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002149
2150 if (!output)
2151 {
2152 return Fail("%s: Could not read output 0", __func__);
2153 }
2154
2155 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002156 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002157
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002158 if (IsDynamicTensor(outputInfo))
2159 {
2160 return Fail("%s: Dynamic output tensors are not supported", __func__);
2161 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002162
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002163 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002164 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002165 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002166
2167 if (weightsOperand == nullptr)
2168 {
2169 return Fail("%s: Operand is invalid", __func__);
2170 }
2171 armnn::DepthwiseConvolution2dDescriptor desc;
2172 desc.m_DataLayout = armnn::DataLayout::NHWC;
2173
Mike Kellyb5fdf382019-06-11 16:35:25 +01002174 // Reinterpret weight data as [ H, W, I, M ]
2175 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2176 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002177 inputInfo.GetShape()[3],
2178 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002179
2180 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2181 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2182
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002183 const ConstTensorPin weightsPin =
2184 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2185 1,
2186 model,
2187 data,
2188 HWIMToMIHW,
2189 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002190
2191 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002192 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002193
2194 if (!weightsPin.IsValid() || !biasPin.IsValid())
2195 {
2196 return Fail("%s: Operation has invalid inputs", __func__);
2197 }
2198
2199 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2200 armnn::ConstTensor bias = biasPin.GetConstTensor();
2201 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2202
2203 ActivationFn activation;
2204
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002205 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002206 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002207 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2208 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2209 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2210 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2211 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2212 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002213 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002214 {
2215 return Fail("%s: Operation has invalid inputs", __func__);
2216 }
2217 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002218 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002219 {
2220 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002221 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2222 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2223 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002224 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002225 {
2226 return Fail("%s: Operation has invalid inputs", __func__);
2227 }
2228
2229 const uint32_t kernelX = weights.GetShape()[3];
2230 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002231 const uint32_t inputX = inputInfo.GetShape()[2];
2232 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002233
2234 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2235 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2236 }
2237 else
2238 {
2239 return Fail("%s: Unsupported number of operation inputs", __func__);
2240 }
2241
2242 desc.m_BiasEnabled = true;
2243 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2244
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002245 bool isSupported = false;
2246 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2247 IsDepthwiseConvolutionSupported,
2248 data.m_Backends,
2249 isSupported,
2250 inputInfo,
2251 outputInfo,
2252 desc,
2253 weights.GetInfo(),
2254 biases);
2255 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002256 {
2257 return false;
2258 }
2259
2260 armnn::IConnectableLayer* startLayer =
2261 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2262 if (!startLayer)
2263 {
2264 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2265 }
2266
2267 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2268 if (!endLayer)
2269 {
2270 return Fail("%s: ProcessActivation failed", __func__);
2271 }
2272
2273 input.Connect(startLayer->GetInputSlot(0));
2274
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002275 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002276}
2277
Mike Kelly3c673942019-07-25 09:26:06 +01002278template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002279 typename HalOperation = typename HalPolicy::Operation,
2280 typename HalModel = typename HalPolicy::Model>
2281bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002282{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002283 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002284
2285 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2286 if (!input.IsValid())
2287 {
2288 return Fail("%s: Operation has invalid input", __func__);
2289 }
2290
Sadik Armagan98c0f662019-11-21 15:54:36 +00002291 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2292 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2293 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2294 {
2295 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2296 }
2297
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002298 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002299 if (!outputOperand)
2300 {
2301 return Fail("%s: Operation has invalid outputs", __func__);
2302 }
2303
2304 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2305 if (IsDynamicTensor(outputInfo))
2306 {
2307 return Fail("%s: Dynamic output tensors are not supported", __func__);
2308 }
2309
2310 bool isSupported = false;
2311 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2312 IsDequantizeSupported,
2313 data.m_Backends,
2314 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002315 inputInfo,
2316 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002317 if (!isSupported)
2318 {
2319 return false;
2320 }
2321
2322 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2323 assert(layer != nullptr);
2324 input.Connect(layer->GetInputSlot(0));
2325
2326 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2327}
2328
2329template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002330 typename HalOperation = typename HalPolicy::Operation,
2331 typename HalModel = typename HalPolicy::Model>
2332bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002333{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002334 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002335
2336 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2337 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2338
2339 if (!input0.IsValid() || !input1.IsValid())
2340 {
2341 return Fail("%s: Operation has invalid inputs", __func__);
2342 }
2343
2344 // The FuseActivation parameter is always the input index 2
2345 // and it should be optional
2346 ActivationFn activationFunction;
2347 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2348 {
2349 return Fail("%s: Operation has invalid inputs", __func__);
2350 }
2351
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002352 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002353 if (!output)
2354 {
2355 return Fail("%s: Could not read output 0", __func__);
2356 }
2357
2358 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2359 if (IsDynamicTensor(outputInfo))
2360 {
2361 return Fail("%s: Dynamic output tensors are not supported", __func__);
2362 }
2363
2364 bool isSupported = false;
2365 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2366 IsDivisionSupported,
2367 data.m_Backends,
2368 isSupported,
2369 input0.GetTensorInfo(),
2370 input1.GetTensorInfo(),
2371 outputInfo);
2372 if (!isSupported)
2373 {
2374 return false;
2375 }
2376
2377 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2378 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2379
2380 if (endLayer)
2381 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002382 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002383 if (!isReshapeSupported)
2384 {
2385 return false;
2386 }
2387
Mike Kelly46272802019-08-14 17:00:48 +01002388 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2389 }
2390 return Fail("%s: ProcessActivation failed", __func__);
2391}
2392
2393template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002394 typename HalOperation = typename HalPolicy::Operation,
2395 typename HalModel = typename HalPolicy::Model>
2396bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002397{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002398 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002399
2400 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2401 if (!input.IsValid())
2402 {
2403 return Fail("%s: Operation has invalid inputs", __func__);
2404 }
2405
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002406 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002407 if (!outputOperand)
2408 {
2409 return Fail("%s: Operation has invalid outputs", __func__);
2410 }
2411
2412 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2413 if (IsDynamicTensor(outputInfo))
2414 {
2415 return Fail("%s: Dynamic output tensors are not supported", __func__);
2416 }
2417
2418 bool isSupported = false;
2419 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2420 IsFloorSupported,
2421 data.m_Backends,
2422 isSupported,
2423 input.GetTensorInfo(),
2424 outputInfo);
2425 if (!isSupported)
2426 {
2427 return false;
2428 }
2429
2430 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2431 assert(layer != nullptr);
2432 input.Connect(layer->GetInputSlot(0));
2433
2434 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2435}
2436
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002437inline bool IsQSymm8(const V1_0::Operand&)
2438{
2439 return false;
2440}
2441
2442#ifdef ARMNN_ANDROID_NN_V1_2
2443
2444inline bool IsQSymm8(const V1_2::Operand& operand)
2445{
2446 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2447}
2448
2449#endif
2450
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002451enum class DequantizeStatus
2452{
2453 SUCCESS,
2454 NOT_REQUIRED,
2455 INVALID_OPERAND
2456};
2457
2458using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2459
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002460template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002461 typename HalOperation = typename HalPolicy::Operation,
2462 typename HalModel = typename HalPolicy::Model>
2463DequantizeResult DequantizeIfRequired(size_t operand_index,
2464 const HalOperation& operation,
2465 const HalModel& model,
2466 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002467{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002468 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002469
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002470 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002471 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002472 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002473 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002474 }
2475
2476 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2477 {
2478 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002479 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002480 }
2481
2482 const size_t weightsInputIndex = operation.inputs[operand_index];
2483
2484 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2485 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2486 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2487 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002488 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002489 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002490 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2491 {
2492 continue;
2493 }
2494
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002495 size_t outOpIndex = weightsInputIndex + 1;
2496 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002497 {
2498 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002499 }
2500
2501 if (outOpIndex != weightsInputIndex)
2502 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002503 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002504 }
2505
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002506 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002507 BOOST_ASSERT(operand);
2508
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002509 if (!IsQSymm8(*operand))
2510 {
2511 // Only supporting dequantize from QSYMM8 to FLOAT
2512 break;
2513 }
2514
2515 // Allocate a new buffer for the dequantized data and manually dequantize
2516 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2517 if (!startValue)
2518 {
2519 // Failed to get the operand address
2520 break;
2521 }
2522
2523 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2524 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002525 const float quantizationScale = operand->scale;
2526
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002527 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2528 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2529 {
2530 float* dstPtr = dequantizedBuffer.get();
2531 BOOST_ASSERT(dstPtr);
2532 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2533 }
2534
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002535 // Construct tensor info for dequantized ConstTensor
2536 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2537 operand->dimensions.data(),
2538 armnn::DataType::Float32);
2539
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002540 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2541 std::move(tensorInfo),
2542 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002543 }
2544
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002545 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002546}
2547
2548template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002549 typename HalOperation = typename HalPolicy::Operation,
2550 typename HalModel = typename HalPolicy::Model>
2551ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2552 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002553 const ConversionData& data,
2554 size_t operandIndex,
2555 bool optional = false)
2556{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002557 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2558
2559 DequantizeStatus status = std::get<3>(dequantized);
2560 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002561 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002562 case DequantizeStatus::INVALID_OPERAND:
2563 {
2564 // return invalid const tensor pin
2565 return ConstTensorPin();
2566 }
2567 case DequantizeStatus::NOT_REQUIRED:
2568 {
2569 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2570 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2571 }
2572 case DequantizeStatus::SUCCESS:
2573 default:
2574 {
2575 return ConstTensorPin(
2576 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2577 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002578 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002579}
2580
2581
Mike Kelly46272802019-08-14 17:00:48 +01002582template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002583 typename HalOperation = typename HalPolicy::Operation,
2584 typename HalModel = typename HalPolicy::Model>
2585bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002586{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002587 using HalOperand = typename HalPolicy::Operand;
2588
Mike Kelly46272802019-08-14 17:00:48 +01002589 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2590 if (!input.IsValid())
2591 {
2592 return Fail("%s: Operation has invalid inputs", __func__);
2593 }
2594
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002595 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002596 if (!output)
2597 {
2598 return Fail("%s: Could not read output 0", __func__);
2599 }
2600
2601 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2602 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2603
2604 if (IsDynamicTensor(outputInfo))
2605 {
2606 return Fail("%s: Dynamic output tensors are not supported", __func__);
2607 }
2608
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002609 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2610 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002611
2612 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002613 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002614 return Fail("%s: Operation has invalid weights", __func__);
2615 }
2616
2617 if (!biasPin.IsValid())
2618 {
2619 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002620 }
2621
2622 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2623 armnn::ConstTensor bias = biasPin.GetConstTensor();
2624 armnn::TensorInfo reshapedInfo = inputInfo;
2625
2626 try
2627 {
2628 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002629 }
2630 catch (const std::exception& e)
2631 {
Mike Kelly46272802019-08-14 17:00:48 +01002632 return Fail("%s: %s", __func__, e.what());
2633 }
2634
2635 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2636 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2637
2638 ActivationFn activationFunction;
2639 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2640 {
2641 return Fail("%s: Operation has invalid inputs", __func__);
2642 }
2643
2644 armnn::FullyConnectedDescriptor desc;
2645 desc.m_TransposeWeightMatrix = true;
2646 desc.m_BiasEnabled = true;
2647
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002648 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2649 weights.GetInfo().GetShape(),
2650 outputInfo.GetShape(),
2651 desc.m_TransposeWeightMatrix))
2652 {
2653 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2654 }
2655
Mike Kelly46272802019-08-14 17:00:48 +01002656 bool isSupported = false;
2657 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2658 IsFullyConnectedSupported,
2659 data.m_Backends,
2660 isSupported,
2661 reshapedInfo,
2662 outputInfo,
2663 weights.GetInfo(),
2664 bias.GetInfo(),
2665 desc);
2666 if (!isSupported)
2667 {
2668 return false;
2669 }
2670
2671 armnn::IConnectableLayer* startLayer =
2672 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2673 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2674
2675 if (endLayer != nullptr)
2676 {
2677 if (inputInfo.GetNumDimensions() > 2U)
2678 {
2679 armnn::ReshapeDescriptor reshapeDescriptor;
2680 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2681
2682 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2683 assert(reshapeLayer != nullptr);
2684 input.Connect(reshapeLayer->GetInputSlot(0));
2685 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2686 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2687 }
2688 else
2689 {
2690 input.Connect(startLayer->GetInputSlot(0));
2691 }
2692
2693 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2694 }
2695 else
2696 {
2697 return Fail("%s: ProcessActivation failed", __func__);
2698 }
2699}
2700
2701template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002702 typename HalOperation = typename HalPolicy::Operation,
2703 typename HalModel = typename HalPolicy::Model>
2704bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002705{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002706 using HalOperand = typename HalPolicy::Operand;
2707
Mike Kelly999e2092019-08-15 10:46:46 +01002708 if (operation.inputs.size() != 1)
2709 {
2710 return Fail("%s: Optional inputs are not supported", __func__);
2711 }
2712
Mike Kelly46272802019-08-14 17:00:48 +01002713 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2714 if (!input.IsValid())
2715 {
2716 return Fail("%s: Operation has invalid inputs", __func__);
2717 }
2718
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002719 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002720 if (!output)
2721 {
2722 return Fail("%s: Could not read output 0", __func__);
2723 }
2724
2725 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2726 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2727
2728 if (IsDynamicTensor(outputInfo))
2729 {
2730 return Fail("%s: Dynamic output tensors are not supported", __func__);
2731 }
2732 if (outputInfo.GetNumDimensions() != 4u)
2733 {
2734 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2735 }
2736
2737 armnn::L2NormalizationDescriptor desc;
2738 desc.m_DataLayout = armnn::DataLayout::NHWC;
2739
2740 bool isSupported = false;
2741 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2742 IsL2NormalizationSupported,
2743 data.m_Backends,
2744 isSupported,
2745 inputInfo,
2746 outputInfo,
2747 desc);
2748 if (!isSupported)
2749 {
2750 return false;
2751 }
2752
2753 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2754 assert(layer != nullptr);
2755 input.Connect(layer->GetInputSlot(0));
2756
2757 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2758}
2759
2760template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002761 typename HalOperation = typename HalPolicy::Operation,
2762 typename HalModel = typename HalPolicy::Model>
2763bool ConvertLocalResponseNormalization(const HalOperation& operation,
2764 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002765 ConversionData& data)
2766{
Mike Kelly999e2092019-08-15 10:46:46 +01002767 if (operation.inputs.size() != 5)
2768 {
2769 return Fail("%s: Optional inputs are not supported", __func__);
2770 }
2771
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002772 using HalOperand = typename HalPolicy::Operand;
2773 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002774
2775 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2776 if (!input.IsValid())
2777 {
2778 return Fail("%s: Operation has invalid inputs", __func__);
2779 }
2780
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002781 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002782 if (!output)
2783 {
2784 return Fail("%s: Could not read output 0", __func__);
2785 }
2786
2787 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2788 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2789
2790 if (IsDynamicTensor(outputInfo))
2791 {
2792 return Fail("%s: Dynamic output tensors are not supported", __func__);
2793 }
2794 if (outputInfo.GetNumDimensions() != 4u)
2795 {
2796 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2797 }
2798
2799 armnn::NormalizationDescriptor descriptor;
2800 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2801 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2802 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2803
2804 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002805 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002806 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2807 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2808 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2809 {
2810 return Fail("%s: Operation has invalid inputs", __func__);
2811 }
2812
2813 // ArmNN expects normSize to be the full size of the normalization
2814 // window rather than the radius as in AndroidNN.
2815 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2816
2817 bool isSupported = false;
2818 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2819 IsNormalizationSupported,
2820 data.m_Backends,
2821 isSupported,
2822 inputInfo,
2823 outputInfo,
2824 descriptor);
2825 if (!isSupported)
2826 {
2827 return false;
2828 }
2829
2830
2831 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2832 assert(layer != nullptr);
2833 input.Connect(layer->GetInputSlot(0));
2834
2835 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2836}
2837
2838template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002839 typename HalOperation = typename HalPolicy::Operation,
2840 typename HalModel = typename HalPolicy::Model>
2841bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002842{
Mike Kelly46272802019-08-14 17:00:48 +01002843 armnn::ActivationDescriptor desc;
2844 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2845
2846 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2847}
2848
2849template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002850 typename HalOperation = typename HalPolicy::Operation,
2851 typename HalModel = typename HalPolicy::Model>
2852bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002853{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002854 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002855
2856 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2857 if (!input.IsValid())
2858 {
2859 return Fail("%s: Operation has invalid inputs", __func__);
2860 }
2861
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002862 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002863 if (!output)
2864 {
2865 return Fail("%s: Could not read output 0", __func__);
2866 }
2867
2868 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2869 if (IsDynamicTensor(outputInfo))
2870 {
2871 return Fail("%s: Dynamic output tensors are not supported", __func__);
2872 }
2873
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002874 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002875 if (!axisOperand)
2876 {
2877 return Fail("%s: Could not read input 1", __func__);
2878 }
2879
2880 std::vector<int32_t> axis;
2881 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2882 {
2883 return Fail("%s: Input 1 has invalid values", __func__);
2884 }
2885
2886 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2887
2888 // Convert the axis to unsigned int and remove duplicates.
2889 unsigned int rank = inputInfo.GetNumDimensions();
2890 std::set<unsigned int> uniqueAxis;
2891 std::transform(axis.begin(), axis.end(),
2892 std::inserter(uniqueAxis, uniqueAxis.begin()),
2893 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2894
2895 // Get the "keep dims" flag.
2896 int32_t keepDims = 0;
2897 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2898 {
2899 return Fail("%s: Could not read input 2", __func__);
2900 }
2901
2902 armnn::MeanDescriptor descriptor;
2903 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2904 descriptor.m_KeepDims = keepDims > 0;
2905
2906 bool isSupported = false;
2907 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2908 IsMeanSupported,
2909 data.m_Backends,
2910 isSupported,
2911 inputInfo,
2912 outputInfo,
2913 descriptor);
2914 if (!isSupported)
2915 {
2916 return false;
2917 }
2918
2919 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2920 assert(layer != nullptr);
2921 input.Connect(layer->GetInputSlot(0));
2922
2923 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2924}
2925
2926template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002927 typename HalOperation = typename HalPolicy::Operation,
2928 typename HalModel = typename HalPolicy::Model>
2929bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002930{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002931 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002932
2933 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2934 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2935
2936 if (!input0.IsValid() || !input1.IsValid())
2937 {
2938 return Fail("%s: Operation has invalid inputs", __func__);
2939 }
2940
2941 // The FuseActivation parameter is always the input index 2
2942 // and it should be optional
2943 ActivationFn activationFunction;
2944 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2945 {
2946 return Fail("%s: Operation has invalid inputs", __func__);
2947 }
2948
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002949 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002950
2951 if (outputOperand == nullptr)
2952 {
2953 return false;
2954 }
2955
2956 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2957 if (IsDynamicTensor(outputInfo))
2958 {
2959 return Fail("%s: Dynamic output tensors are not supported", __func__);
2960 }
2961
2962 bool isSupported = false;
2963 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2964 IsMultiplicationSupported,
2965 data.m_Backends,
2966 isSupported,
2967 input0.GetTensorInfo(),
2968 input1.GetTensorInfo(),
2969 outputInfo);
2970 if (!isSupported)
2971 {
2972 return false;
2973 }
2974
2975 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2976 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2977
2978 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2979 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2980
2981 if (endLayer != nullptr)
2982 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002983 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002984 if (!isReshapeSupported)
2985 {
2986 return false;
2987 }
2988
Mike Kelly46272802019-08-14 17:00:48 +01002989 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2990 }
2991 else
2992 {
2993 return Fail("%s: ProcessActivation failed", __func__);
2994 }
2995}
2996
2997template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002998 typename HalOperation = typename HalPolicy::Operation,
2999 typename HalModel = typename HalPolicy::Model>
3000bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003001{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003002 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003003
Mike Kelly3c673942019-07-25 09:26:06 +01003004 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3005 if (!input.IsValid())
3006 {
3007 return Fail("%s: Operation has invalid inputs", __func__);
3008 }
3009
3010 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3011 unsigned int rank = inputInfo.GetNumDimensions();
3012
3013 armnn::PadDescriptor descriptor;
3014 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3015 {
3016 return Fail("%s: Could not convert paddings", __func__);
3017 }
3018
3019 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3020 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3021 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003022 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003023 {
3024 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3025 }
3026
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003027 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003028 if (!output)
3029 {
3030 return Fail("%s: Could not read output", __func__);
3031 }
3032
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003033 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003034 if (IsDynamicTensor(outputInfo))
3035 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003036 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003037 }
3038
3039 bool isSupported = false;
3040 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3041 IsPadSupported,
3042 data.m_Backends,
3043 isSupported,
3044 inputInfo,
3045 outputInfo,
3046 descriptor);
3047 if (!isSupported)
3048 {
3049 return false;
3050 }
3051
3052 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3053 assert(layer != nullptr);
3054 input.Connect(layer->GetInputSlot(0));
3055 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3056
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003057 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003058}
3059
Mike Kelly0a879362019-07-29 16:56:31 +01003060template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003061 typename HalOperation = typename HalPolicy::Operation,
3062 typename HalModel = typename HalPolicy::Model>
3063bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003064{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003065 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003066
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003067 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3068 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3069 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003070
3071 if (inputOperand == nullptr
3072 || requestedShapeOperand == nullptr
3073 || outputOperand == nullptr)
3074 {
3075 return Fail("%s: Operation has invalid inputs", __func__);
3076 }
3077
3078 if (requestedShapeOperand->dimensions.size() != 1)
3079 {
3080 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3081 __func__, requestedShapeOperand->dimensions.size());
3082 }
3083
3084 std::vector<int32_t> targetDimensions;
3085 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3086 {
3087 return Fail("%s: Could not read values of input 1", __func__);
3088 }
3089
3090 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3091
3092 Shape requestedShape;
3093 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3094 // function that resolves these values into a fully specified tensor shape.
3095 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3096 {
3097 return Fail("%s: Failed to resolve the requested shape", __func__);
3098 }
3099
3100 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3101 if (!SameShape(requestedShape, outputOperandShape))
3102 {
3103 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3104 }
3105
3106 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3107 if (!input.IsValid())
3108 {
3109 return Fail("%s: Could not read input 0", __func__);
3110 }
3111
3112 armnn::ReshapeDescriptor reshapeDescriptor;
3113 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3114 requestedShape.dimensions.data());
3115
3116 bool isSupported = false;
3117 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3118 IsReshapeSupported,
3119 data.m_Backends,
3120 isSupported,
3121 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003122 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003123 reshapeDescriptor);
3124 if (!isSupported)
3125 {
3126 return false;
3127 }
3128
3129 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3130 assert(layer != nullptr);
3131 input.Connect(layer->GetInputSlot(0));
3132
3133 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3134}
3135
3136template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003137 typename HalOperation = typename HalPolicy::Operation,
3138 typename HalModel = typename HalPolicy::Model>
3139bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003140{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003141 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003142
Mike Kelly0a879362019-07-29 16:56:31 +01003143 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3144 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3145
3146 if (!input0.IsValid() || !input1.IsValid())
3147 {
3148 return Fail("%s: Operation has invalid inputs", __func__);
3149 }
3150
3151 // The FuseActivation parameter is always the input index 2
3152 // and it should be optional
3153 ActivationFn activationFunction;
3154 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3155 {
3156 return Fail("%s: Operation has invalid inputs", __func__);
3157 }
3158
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003159 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003160 if (!output)
3161 {
3162 return Fail("%s: Could not read output 0", __func__);
3163 }
3164
3165 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3166 if (IsDynamicTensor(outputInfo))
3167 {
3168 return Fail("%s: Dynamic output tensors are not supported", __func__);
3169 }
3170
3171 bool isSupported = false;
3172 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3173 IsSubtractionSupported,
3174 data.m_Backends,
3175 isSupported,
3176 input0.GetTensorInfo(),
3177 input1.GetTensorInfo(),
3178 outputInfo);
3179 if (!isSupported)
3180 {
3181 return false;
3182 }
3183
3184 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3185 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3186
3187 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3188 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3189
3190 if (endLayer)
3191 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003192 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003193 if (!isReshapeSupported)
3194 {
3195 return false;
3196 }
Mike Kelly0a879362019-07-29 16:56:31 +01003197 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3198 }
3199
3200 return Fail("%s: ProcessActivation failed", __func__);
3201}
3202
Finn Williams23b87b32019-07-30 11:44:05 +01003203template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003204 typename HalOperation = typename HalPolicy::Operation,
3205 typename HalModel = typename HalPolicy::Model>
3206bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003207{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003208 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003209
3210 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3211 if (!input.IsValid())
3212 {
3213 return Fail("%s: Operation has invalid inputs", __func__);
3214 }
3215
3216 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3217 unsigned int rank = inputInfo.GetNumDimensions();
3218 if (rank > 4)
3219 {
3220 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3221 }
3222
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003223 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003224 if (!output)
3225 {
3226 return Fail("%s: Could not read output 0", __func__);
3227 }
3228
3229 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3230 {
3231 return Fail("%s: Dynamic output tensors are not supported", __func__);
3232 }
3233
3234 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3235 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003236 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003237
3238 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3239
3240 std::vector<int32_t> axis;
3241 if (!axisOperand)
3242 {
3243 axis.assign(dimensionSequence,
3244 dimensionSequence + rank);
3245 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003246 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003247 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003248 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003249 }
3250
3251 std::vector<uint32_t> outputDims;
3252 for (unsigned int i = 0; i < rank; i++)
3253 {
3254 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3255 auto currentDimension = inputInfo.GetShape()[i];
3256 if (skipSqueeze || currentDimension != 1)
3257 {
3258 outputDims.push_back(currentDimension);
3259 }
3260 }
3261
3262 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3263
3264 armnn::TensorInfo outputInfo = inputInfo;
3265 outputInfo.SetShape(outShape);
3266
3267 armnn::ReshapeDescriptor reshapeDesc;
3268 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3269
3270 bool isSupported = false;
3271 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3272 IsReshapeSupported,
3273 data.m_Backends,
3274 isSupported,
3275 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003276 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003277 reshapeDesc);
3278 if (!isSupported)
3279 {
3280 return false;
3281 }
3282
3283 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3284 assert(layer != nullptr);
3285 input.Connect(layer->GetInputSlot(0));
3286
3287 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3288}
3289
3290template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003291 typename HalOperation = typename HalPolicy::Operation,
3292 typename HalModel = typename HalPolicy::Model>
3293bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003294{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003295 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003296
3297 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3298 if (!input.IsValid())
3299 {
3300 return Fail("%s: Operation has invalid inputs", __func__);
3301 }
3302
3303 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3304 unsigned int rank = inputInfo.GetNumDimensions();
3305 if (rank > 4)
3306 {
3307 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3308 }
3309
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003310 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003311 if (!output)
3312 {
3313 return Fail("%s: Could not read output 0", __func__);
3314 }
3315
3316 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3317 if (IsDynamicTensor(outputInfo))
3318 {
3319 return Fail("%s: Dynamic output tensors are not supported", __func__);
3320 }
3321
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003322 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3323 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3324 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003325
3326 std::vector<int32_t> beginValues;
3327 std::vector<int32_t> endValues;
3328 std::vector<int32_t> stridesValues;
3329
3330 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003331 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003332 {
3333 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3334 {
3335 return false;
3336 }
3337
3338 if (operandValues.size() != rank)
3339 {
3340 return false;
3341 }
3342
3343 return true;
3344 };
3345
3346 if (!ValidateInputOperands(*beginOperand, beginValues)
3347 || !ValidateInputOperands(*endOperand, endValues)
3348 || !ValidateInputOperands(*stridesOperand, stridesValues))
3349 {
3350 return Fail("%s: Operation has invalid input operand", __func__);
3351 }
3352
3353 // Stride cannot have value '0'
3354 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3355 {
3356 return Fail("%s: Stride must be non-zero value.", __func__);
3357 }
3358
3359 armnn::StridedSliceDescriptor descriptor;
3360 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3361 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3362 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3363 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3364
3365 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3366 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3367 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3368 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3369 {
3370 return Fail("%s: Operation has invalid inputs", __func__);
3371 }
3372
3373 bool isSupported = false;
3374 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3375 IsStridedSliceSupported,
3376 data.m_Backends,
3377 isSupported,
3378 inputInfo,
3379 outputInfo,
3380 descriptor);
3381 if (!isSupported)
3382 {
3383 return false;
3384 }
3385
3386 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3387 assert(layer != nullptr);
3388 input.Connect(layer->GetInputSlot(0));
3389
3390 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3391}
3392
3393template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003394 typename HalOperation = typename HalPolicy::Operation,
3395 typename HalModel = typename HalPolicy::Model>
3396bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003397{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003398 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003399
3400 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3401 if (!input.IsValid())
3402 {
3403 return Fail("%s: Operation has invalid inputs", __func__);
3404 }
3405
3406 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3407 unsigned int rank = inputInfo.GetNumDimensions();
3408 if (rank > 4)
3409 {
3410 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3411 }
3412
3413 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3414 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003415 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003416
3417 std::vector<int32_t> perm(rank);
3418 if (!permOperand)
3419 {
3420 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3421 for (unsigned int i = rank; i > 0; i--)
3422 {
3423 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3424 }
3425 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003426 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003427 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003428 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003429 }
3430
3431 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3432
Mike Kelly4a956582020-02-28 10:32:09 +00003433 armnn::TransposeDescriptor transposeDesc;
3434 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003435
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003436 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003437 if (!output)
3438 {
3439 return Fail("%s: Could not read output 0", __func__);
3440 }
3441
3442 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003443 if (IsDynamicTensor(outputInfo))
3444 {
3445 return Fail("%s: Dynamic output tensors are not supported", __func__);
3446 }
3447
Mike Kelly46272802019-08-14 17:00:48 +01003448
3449 bool isSupported = false;
3450 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003451 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003452 data.m_Backends,
3453 isSupported,
3454 inputInfo,
3455 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003456 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003457 if (!isSupported)
3458 {
3459 return false;
3460 }
3461
Mike Kelly4a956582020-02-28 10:32:09 +00003462 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003463 assert(layer != nullptr);
3464 input.Connect(layer->GetInputSlot(0));
3465
3466 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3467}
3468
3469template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003470 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003471 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003472 typename HalModel = typename HalPolicy::Model>
3473bool ConvertBatchToSpaceNd(const HalOperation& operation,
3474 const HalModel& model,
3475 ConversionData& data)
3476{
Finn Williams23b87b32019-07-30 11:44:05 +01003477
3478 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3479 if (!input.IsValid())
3480 {
3481 return Fail("%s: Operation has invalid inputs", __func__);
3482 }
3483
3484 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3485 if (!output)
3486 {
3487 return Fail("%s: Could not read output 0", __func__);
3488 }
3489
3490 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3491 if (IsDynamicTensor(outputInfo))
3492 {
3493 return Fail("%s: Dynamic output tensors are not supported", __func__);
3494 }
3495
3496 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3497 if (!blockOperand)
3498 {
3499 return Fail("%s: Could not read input 1", __func__);
3500 }
3501
3502 // Convert the block operand to int32
3503 std::vector<int32_t> block;
3504 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3505 {
3506 return Fail("%s: Input 1 has invalid values", __func__);
3507 }
3508
3509 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3510
3511 unsigned int rank = inputInfo.GetNumDimensions();
3512 if (rank != 4)
3513 {
3514 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3515 }
3516
3517 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3518 {
3519 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3520 " greater than or equal to 1", __func__);
3521 }
3522
3523 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3524 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3525 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3526
3527 if (Is12Operand(*output))
3528 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003529 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003530 }
3531 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3532 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3533
3534 bool isSupported = false;
3535 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3536 IsBatchToSpaceNdSupported,
3537 data.m_Backends,
3538 isSupported,
3539 inputInfo,
3540 outputInfo,
3541 batchToSpaceNdDesc);
3542 if (!isSupported)
3543 {
3544 return false;
3545 }
3546
3547 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3548 assert(layer != nullptr);
3549 input.Connect(layer->GetInputSlot(0));
3550
3551 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3552}
Mike Kelly0a879362019-07-29 16:56:31 +01003553
Finn Williamsd74c5052019-07-30 17:06:00 +01003554template<typename HalPolicy,
3555 typename HalOperation = typename HalPolicy::Operation,
3556 typename HalOperand = typename HalPolicy::Operand,
3557 typename HalModel = typename HalPolicy::Model>
3558bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3559{
3560 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3561 if (!input.IsValid())
3562 {
3563 return Fail("%s: Operation has invalid inputs", __func__);
3564 }
3565
3566 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3567 unsigned int rank = inputInfo.GetNumDimensions();
3568 unsigned int spatialDim = rank - 2;
3569
3570 if (rank != 4)
3571 {
3572 Fail("%s: Only inputs with rank 4 are supported", __func__);
3573 }
3574
3575 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3576 if (!output)
3577 {
3578 return Fail("%s: Could not read output 0", __func__);
3579 }
3580
3581 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3582 if (IsDynamicTensor(outputInfo))
3583 {
3584 return Fail("%s: Dynamic output tensors are not supported", __func__);
3585 }
3586
3587 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3588 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3589
3590 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3591 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3592 {
3593 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3594 }
3595
3596 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003597 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3598 {
3599 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3600 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003601 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3602 {
3603 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3604 }
3605
3606 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3607 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3608 {
3609 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3610 }
3611
3612 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3613 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003614 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3615 {
3616 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3617 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003618 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3619 {
3620 int paddingBeforeInput = paddings[i];
3621 int paddingAfterInput = paddings[i + 1];
3622 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3623 {
3624 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3625 }
3626
3627 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3628 }
3629
3630 armnn::SpaceToBatchNdDescriptor descriptor;
3631 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3632 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3633 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3634
3635 if (Is12Operand(*output))
3636 {
3637 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3638 }
3639
3640 bool isSupported = false;
3641 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3642 IsSpaceToBatchNdSupported,
3643 data.m_Backends,
3644 isSupported,
3645 inputInfo,
3646 outputInfo,
3647 descriptor);
3648 if (!isSupported)
3649 {
3650 return false;
3651 }
3652
3653 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3654 assert(layer != nullptr);
3655 input.Connect(layer->GetInputSlot(0));
3656
3657 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3658}
3659
saoste01b8471482018-10-10 09:44:51 +01003660} // namespace armnn_driver