blob: cfbef5a8be736d79656f899ca2aa9f94634219de [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
128 std::string reasonIfUnsupported; \
129 try { \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
134 { \
135 supported = \
136 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
137 if (supported) \
138 { \
139 break; \
140 } \
141 else \
142 { \
143 if (reasonIfUnsupported.size() > 0) \
144 { \
145 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
146 } \
147 else \
148 { \
149 ALOGD("%s: not supported by armnn", funcName); \
150 } \
151 } \
152 } \
153 else \
154 { \
155 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
156 } \
157 } \
158 if (!supported) \
159 { \
160 ALOGD("%s: not supported by any specified backend", funcName); \
161 } \
162 } catch (const armnn::InvalidArgumentException &e) { \
163 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100164 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100165
Mike Kellyb5fdf382019-06-11 16:35:25 +0100166template<typename Operand>
167armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100168{
169 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
170}
171
Matthew Bentham912b3622019-05-03 15:49:14 +0100172inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100173{
Matthew Bentham912b3622019-05-03 15:49:14 +0100174 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
175 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
176 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100177}
178
Mike Kellyb5fdf382019-06-11 16:35:25 +0100179#ifdef ARMNN_ANDROID_NN_V1_2
180
181inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
182{
183 return type == V1_2::OperandType::BOOL ||
184 type == V1_2::OperandType::TENSOR_FLOAT16 ||
185 type == V1_2::OperandType::TENSOR_FLOAT32 ||
186 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
188 type == V1_2::OperandType::TENSOR_INT32;
189}
190
191#endif
192
193inline bool IsBool(V1_0::Operand)
194{
195 return false;
196}
197
Sadik Armagan61113162019-07-25 09:09:40 +0100198inline bool Is12Operand(V1_0::Operand)
199{
200 return false;
201}
202
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203#ifdef ARMNN_ANDROID_NN_V1_2
204
205inline bool IsBool(V1_2::Operand operand)
206{
207 return operand.type == V1_2::OperandType::BOOL;
208}
209
Sadik Armagan61113162019-07-25 09:09:40 +0100210/// Checks if a operand is 1_2 Operand
211inline bool Is12Operand(V1_2::Operand)
212{
213 return true;
214}
215
Mike Kellyb5fdf382019-06-11 16:35:25 +0100216#endif
217
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100218template<typename LayerHandleType>
219armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
220 armnn::TensorInfo reshapeInfo)
221{
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
224
225 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
226 BOOST_ASSERT(reshapeLayer != nullptr);
227
228 // Attach the input layer to the reshape layer
229 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
230 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
231
232 return *reshapeLayer;
233}
234
235void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
236 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100237{
238 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100239
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100240 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
241 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
242
243 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
244 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
245
246 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100247 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100248 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
249 input0.Connect(startLayer->GetInputSlot(0));
250 input1.Connect(startLayer->GetInputSlot(1));
251
252 return;
253 }
254
255 // Since the number of dimensions do not match then we need to add degenerate dimensions
256 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
257
258 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
259 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
260 boost::numeric_cast<int>(inputDimensions1));
261
262 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
263 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
264 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
265
266 const armnn::TensorShape& smallShape = smallInfo.GetShape();
267 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
268 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
269 {
270 reshapedDimensions[i] = smallShape[i - sizeDifference];
271 }
272
273 armnn::TensorInfo reshapedInfo = smallInfo;
274 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
275 reshapedDimensions.data() });
276 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
277
278 if (input0IsSmaller)
279 {
280 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
281 //
282 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100283 // | |
284 // Reshape |
285 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100286 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100287
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
289 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100290 }
291 else
292 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100293 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
294 //
295 // Input0 Input1
296 // | |
297 // | Reshape
298 // \ /
299 // StartLayer
300
arovir01b0717b52018-09-05 17:03:25 +0100301 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100302 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100303 }
304}
305
306void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
307 android::nn::PaddingScheme scheme)
308{
309 int32_t padHead;
310 int32_t padTail;
311 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
312 outPadHead = boost::numeric_cast<uint32_t>(padHead);
313 outPadTail = boost::numeric_cast<uint32_t>(padTail);
314}
315
Mike Kelly86b36d42019-07-12 16:39:33 +0100316#ifdef ARMNN_ANDROID_NN_V1_2
317
318void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
319 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
320{
321 int32_t padHead;
322 int32_t padTail;
323 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
324 outPadHead = boost::numeric_cast<uint32_t>(padHead);
325 outPadTail = boost::numeric_cast<uint32_t>(padTail);
326}
327
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100328void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
329 int32_t& outPadTail, android::nn::PaddingScheme scheme)
330{
331 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
332}
333
Mike Kelly86b36d42019-07-12 16:39:33 +0100334#endif
335
Matthew Bentham912b3622019-05-03 15:49:14 +0100336Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100337{
338 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100339 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100340 shape.dimensions = operand.dimensions;
341 shape.scale = operand.scale;
342 shape.offset = operand.zeroPoint;
343 return shape;
344}
345
Mike Kelly46272802019-08-14 17:00:48 +0100346#ifdef ARMNN_ANDROID_NN_V1_2
347
348Shape GetOperandShape(const V1_2::Operand& operand)
349{
350 Shape shape;
351 shape.type = OperandType(operand.type);
352 shape.dimensions = operand.dimensions;
353 shape.scale = operand.scale;
354 shape.offset = operand.zeroPoint;
355 return shape;
356}
357
358#endif
359
arovir01b0717b52018-09-05 17:03:25 +0100360// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
361// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100362// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
363// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100364void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
365 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
366{
367 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
368 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
369 {
370 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
371 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
372 {
373 ALOGW("Bias quantization scale has been modified to match input*weights");
374 biasInfo.SetQuantizationScale(expectedBiasScale);
375 }
376 }
377}
378
379// 4D Tensor Permutations
380const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
381const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
382const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
383const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
384
385// 3D Permutation Vectors
386const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
387const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
388const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
389
390template<typename OSlot>
391armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
392 const armnn::PermutationVector& mappings)
393{
394 // Add swizzle layer
395 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
396
397 BOOST_ASSERT(layer != nullptr);
398
399 // Connect input to swizzle layer
400 input.Connect(layer->GetInputSlot(0));
401
402 // Setup swizzled output
403 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
404 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
405
406 return *layer;
407}
408
409void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
410{
411 // Add swizzle layer
412 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
413 // Connect swizzled input to layer
414 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
415}
416
417armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
418{
419 // Add deswizzle layer
420 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
421 return deswizzleLayer;
422}
423
424// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
425armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
426 LayerInputHandle& input,
427 armnn::IConnectableLayer& firstLayer,
428 armnn::IConnectableLayer& lastLayer)
429{
430 SwizzleIn(network, input, firstLayer, 0);
431 return DeswizzleOut(network, lastLayer, 0);
432}
433
434// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
435armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
436 armnn::IConnectableLayer& layer)
437{
438 return SwizzleInDeswizzleOut(network, input, layer, layer);
439}
440
441bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
442 const armnn::TensorShape & outputShape,
443 uint32_t concatDim)
444{
445 // Validate the output shape is correct given the input shapes (which have just been validated)
446 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
447 if (outputShape.GetNumDimensions() != numDimensions)
448 {
449 return Fail("%s: Output shape has wrong number of dimensions", __func__);
450 }
451
452 unsigned int outputSizeAlongConcatenatedDimension = 0;
453 for (unsigned int i = 0; i < inputShapes.size(); i++)
454 {
455 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
456 }
457
458 for (unsigned int i = 0; i < numDimensions; ++i)
459 {
460 if (i == concatDim)
461 {
462 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
463 {
464 return Fail(
465 "%s: Invalid output shape for dimension %d (%d != %d)",
466 __func__,
467 i,
468 outputShape[i],
469 outputSizeAlongConcatenatedDimension);
470 }
471 }
472 else
473 {
474 if (outputShape[i] != inputShapes[0][i])
475 {
476 return Fail("%s: Invalid output shape", __func__);
477 }
478 }
479 }
480
481 return true;
482}
483
484bool RequiresReshape(armnn::TensorShape & inputShape)
485{
486 return inputShape.GetNumDimensions() < 3;
487}
488
arovir01b0717b52018-09-05 17:03:25 +0100489void SwizzleInputs(armnn::INetwork& network,
490 std::vector<LayerInputHandle>& inputs,
491 std::vector<armnn::TensorShape>& inputShapes,
492 const armnn::PermutationVector& mapping)
493{
494 if (!mapping.IsEqual(IdentityPermutation4D))
495 {
496 size_t nInputs = inputs.size();
497 for (size_t i=0; i<nInputs; ++i)
498 {
499 // add swizzle layer
500 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
501 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
502 auto& outputInfo = outputSlot.GetTensorInfo();
503 // replace inputs with the swizzled ones
504 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
505 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
506 }
507 }
508}
509
narpra01f176d5a2018-11-18 20:17:48 +0000510bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
511 int32_t & concatDimension,
512 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100513{
narpra01f176d5a2018-11-18 20:17:48 +0000514 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100515 BOOST_ASSERT(numberOfDimensions >= 3);
516
517 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000518 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
519 // or along dimension 0 or 2 for a 3-D tensor.
520 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100521 {
narpra01f176d5a2018-11-18 20:17:48 +0000522 concatDimension = 1;
523 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
524 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100525 }
narpra01f176d5a2018-11-18 20:17:48 +0000526 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100527 {
narpra01f176d5a2018-11-18 20:17:48 +0000528 concatDimension = 0;
529 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
530 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100531 }
narpra01f176d5a2018-11-18 20:17:48 +0000532 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100533}
534
535} // anonymous namespace
536
537namespace armnn_driver
538{
539
540//// Creates an ArmNN activation layer and connects it to the given layer, if the
541//// passed in AndroidNN activation function requires so.
542//// @return The end layer of the sequence of layers built for the given AndroidNN
543//// activation function or nullptr if an error occurred (e.g. unsupported activation).
544//// Note that the end layer matches the input layer if no activation is required
545//// (the sequence of layers has length 1).
546armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
547 ActivationFn activation,
548 armnn::IConnectableLayer* prevLayer,
549 ConversionData& data);
550
551} // namespace armnn_driver
552
553///
554/// Utility templates
555///
556
557namespace armnn_driver
558{
559
560using namespace android::nn;
561
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562template<typename HalPolicy,
563 typename HalOperand = typename HalPolicy::Operand,
564 typename HalOperation = typename HalPolicy::Operation,
565 typename HalModel = typename HalPolicy::Model>
566const HalOperand* GetInputOperand(const HalOperation& operation,
567 uint32_t inputIndex,
568 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100569 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100570{
571 if (inputIndex >= operation.inputs.size())
572 {
saoste01b8471482018-10-10 09:44:51 +0100573 if (failOnIndexOutOfBounds)
574 {
575 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
576 }
arovir01b0717b52018-09-05 17:03:25 +0100577 return nullptr;
578 }
579
580 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
581 return &model.operands[operation.inputs[inputIndex]];
582}
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
585 typename HalOperand = typename HalPolicy::Operand,
586 typename HalOperation = typename HalPolicy::Operation,
587 typename HalModel = typename HalPolicy::Model>
588const HalOperand* GetOutputOperand(const HalOperation& operation,
589 uint32_t outputIndex,
590 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100591{
592 if (outputIndex >= operation.outputs.size())
593 {
594 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
595 return nullptr;
596 }
597
598 // Model should have been validated beforehand
599 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
600
601 return &model.operands[operation.outputs[outputIndex]];
602}
603
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100604template<typename HalPolicy,
605 typename HalOperand = typename HalPolicy::Operand,
606 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100607const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100608 const HalModel& model,
609 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000610 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100611{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100612 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100613
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100614 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100615 switch (operand.lifetime)
616 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100617 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100618 {
619 // Constant found in model.operandValues
620 valueStart = &model.operandValues[operand.location.offset];
621 break;
622 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100623 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100624 {
625 // Constant specified via a Memory object
626 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
627 break;
628 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000630 {
631 // An optional input tensor with no values is not an error so should not register as a fail
632 if (optional)
633 {
634 valueStart = nullptr;
635 break;
636 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100637 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000638 }
arovir01b0717b52018-09-05 17:03:25 +0100639 default:
640 {
641 // Unsupported/invalid (e.g. can't get value of an input to the model)
642 Fail("%s: unsupported/invalid operand lifetime: %s",
643 __func__, toString(operand.lifetime).c_str());
644 valueStart = nullptr;
645 }
646 }
647
648 return valueStart;
649}
650
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100652 typename HalOperation = typename HalPolicy::Operation,
653 typename HalModel = typename HalPolicy::Model,
654 typename HalOperandType = typename HalPolicy::OperandType>
655bool GetOperandType(const HalOperation& operation,
656 uint32_t inputIndex,
657 const HalModel& model,
658 HalOperandType& type)
659{
660 using HalOperand = typename HalPolicy::Operand;
661
662 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
663 if (!operand)
664 {
665 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
666 }
667
668 type = operand->type;
669 return true;
670}
671
672template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100673 typename HalOperand = typename HalPolicy::Operand,
674 typename HalModel = typename HalPolicy::Model>
675ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
676 const HalModel& model,
677 const ConversionData& data,
678 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
679 const armnn::TensorShape* overrideTensorShape = nullptr,
680 bool optional = false)
681{
682 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
683
684 if (!IsOperandTypeSupportedForTensors(operand.type))
685 {
686 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
687 return ConstTensorPin();
688 }
689
690 if (!optional &&
691 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
692 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
693 operand.lifetime != HalOperandLifeTime::NO_VALUE)
694 {
695 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
696 return ConstTensorPin();
697 }
698
699 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
700 if (!valueStart)
701 {
702 if (optional)
703 {
704 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
705 return ConstTensorPin(true);
706 }
707 // mandatory tensor with no values
708 Fail("%s: failed to get operand address", __func__);
709 return ConstTensorPin();
710 }
711
712 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
713 if (overrideTensorShape != nullptr)
714 {
715 tensorInfo.SetShape(*overrideTensorShape);
716 }
717 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
718}
719
720template<typename HalPolicy,
721 typename HalOperation = typename HalPolicy::Operation,
722 typename HalModel = typename HalPolicy::Model>
723ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
724 uint32_t inputIndex,
725 const HalModel& model,
726 const ConversionData& data,
727 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
728 const armnn::TensorShape* overrideTensorShape = nullptr,
729 bool optional = false)
730{
731 using HalOperand = typename HalPolicy::Operand;
732
733 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
734 if (!operand)
735 {
736 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
737 return ConstTensorPin();
738 }
739 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
740 model,
741 data,
742 dimensionMappings,
743 overrideTensorShape,
744 optional);
745}
746
747template<typename HalPolicy,
748 typename OutputType,
749 typename HalOperandType = typename HalPolicy::OperandType,
750 typename HalOperation = typename HalPolicy::Operation,
751 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100752bool GetInputScalar(const HalOperation& operation,
753 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100754 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100755 OutputType& outValue,
756 const HalModel& model,
757 const ConversionData& data)
758{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100759 using HalOperand = typename HalPolicy::Operand;
760
761 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100762 if (!operand)
763 {
764 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
765 }
766
767 if (operand->type != type)
768 {
769 return Fail("%s: unexpected operand type: %s (should be %s)",
770 __func__, toString(operand->type).c_str(), toString(type).c_str());
771 }
772
773 if (operand->location.length != sizeof(OutputType))
774 {
775 return Fail("%s: incorrect operand location length: %i (should be %i)",
776 __func__, operand->location.length, sizeof(OutputType));
777 }
778
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100779 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100780 if (!valueAddress)
781 {
782 return Fail("%s: failed to get address for operand", __func__);
783 }
784
785 outValue = *(static_cast<const OutputType*>(valueAddress));
786 return true;
787}
788
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789template<typename HalPolicy,
790 typename HalOperation = typename HalPolicy::Operation,
791 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100792bool GetInputInt32(const HalOperation& operation,
793 uint32_t inputIndex,
794 int32_t& outValue,
795 const HalModel& model,
796 const ConversionData& data)
797{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100798 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100799}
800
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801template<typename HalPolicy,
802 typename HalOperation = typename HalPolicy::Operation,
803 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100804bool GetInputFloat32(const HalOperation& operation,
805 uint32_t inputIndex,
806 float& outValue,
807 const HalModel& model,
808 const ConversionData& data)
809{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100810 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100811}
812
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813template<typename HalPolicy,
814 typename HalOperation = typename HalPolicy::Operation,
815 typename HalOperandType = typename HalPolicy::OperandType,
816 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100817bool GetInputActivationFunctionImpl(const HalOperation& operation,
818 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100819 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100820 ActivationFn& outActivationFunction,
821 const HalModel& model,
822 const ConversionData& data)
823{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100824 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100825 {
826 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
827 __func__,
828 toString(type).c_str(),
829 toString(OperandType::INT32).c_str(),
830 toString(OperandType::TENSOR_INT32).c_str());
831 }
832
833 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100835 {
836 return Fail("%s: failed to get activation input value", __func__);
837 }
838 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
839 return true;
840}
841
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100842template<typename HalPolicy,
843 typename HalOperation = typename HalPolicy::Operation,
844 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100845bool GetInputActivationFunction(const HalOperation& operation,
846 uint32_t inputIndex,
847 ActivationFn& outActivationFunction,
848 const HalModel& model,
849 const ConversionData& data)
850{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100851 return GetInputActivationFunctionImpl<HalPolicy>(operation,
852 inputIndex,
853 HalPolicy::OperandType::INT32,
854 outActivationFunction,
855 model,
856 data);
arovir01b0717b52018-09-05 17:03:25 +0100857}
858
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100859template<typename HalPolicy,
860 typename HalOperation = typename HalPolicy::Operation,
861 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100862bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
863 uint32_t inputIndex,
864 ActivationFn& outActivationFunction,
865 const HalModel& model,
866 const ConversionData& data)
867{
868 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100869 return GetInputActivationFunctionImpl<HalPolicy>(operation,
870 inputIndex,
871 HalPolicy::OperandType::INT32,
872 outActivationFunction,
873 model,
874 data);
arovir01b0717b52018-09-05 17:03:25 +0100875}
876
877
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878template<typename HalPolicy,
879 typename HalOperation = typename HalPolicy::Operation,
880 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100881bool GetOptionalInputActivation(const HalOperation& operation,
882 uint32_t inputIndex,
883 ActivationFn& activationFunction,
884 const HalModel& model,
885 const ConversionData& data)
886{
887 if (operation.inputs.size() <= inputIndex)
888 {
889 activationFunction = ActivationFn::kActivationNone;
890 }
891 else
892 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100893 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100894 {
895 return Fail("%s: Operation has invalid inputs", __func__);
896 }
897 }
898 return true;
899}
900
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100901template<typename HalPolicy,
902 typename ConvolutionDescriptor,
903 typename HalOperation = typename HalPolicy::Operation,
904 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100905bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
906 uint32_t dilationXIndex,
907 ConvolutionDescriptor& descriptor,
908 const HalModel& model,
909 const ConversionData& data)
910{
911 bool success = true;
912 if (operation.inputs.size() >= dilationXIndex + 2)
913 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 success &= GetInputScalar<HalPolicy>(operation,
915 dilationXIndex,
916 HalPolicy::OperandType::INT32,
917 descriptor.m_DilationX,
918 model,
919 data);
920 success &= GetInputScalar<HalPolicy>(operation,
921 dilationXIndex + 1,
922 HalPolicy::OperandType::INT32,
923 descriptor.m_DilationY,
924 model,
925 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100926 }
927
928 return success;
929}
930
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100931template<typename HalPolicy,
932 typename HalOperand = typename HalPolicy::Operand,
933 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100934bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100935 std::vector<int32_t>& outValues,
936 const HalModel& model,
937 const ConversionData& data)
938{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100939 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100940 {
941 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
942 }
943
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100944 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100945 if (!startAddress)
946 {
947 return Fail("%s: failed to get operand address", __func__, operand.type);
948 }
949
950 // Check number of bytes is sensible
951 const uint32_t numBytes = operand.location.length;
952 if (numBytes % sizeof(int32_t) != 0)
953 {
954 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
955 __func__, numBytes, sizeof(int32_t));
956 }
957
958 outValues.resize(numBytes / sizeof(int32_t));
959 memcpy(outValues.data(), startAddress, numBytes);
960 return true;
961}
962
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100963template<typename HalPolicy,
964 typename HalOperation = typename HalPolicy::Operation,
965 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100966bool GetInputPaddingScheme(const HalOperation& operation,
967 uint32_t inputIndex,
968 PaddingScheme& outPaddingScheme,
969 const HalModel& model,
970 const ConversionData& data)
971{
972 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100974 {
975 return Fail("%s: failed to get padding scheme input value", __func__);
976 }
977
978 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
979 return true;
980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
986 uint32_t inputIndex,
987 const HalModel& model,
988 ConversionData& data)
989{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100990 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100991 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
993
994 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100995 if (!operand)
996 {
997 Fail("%s: failed to get input operand %i", __func__, inputIndex);
998 return LayerInputHandle();
999 }
1000
1001 if (!IsOperandTypeSupportedForTensors(operand->type))
1002 {
1003 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1004 return LayerInputHandle();
1005 }
1006
Sadik Armagan44bcc022019-06-18 17:21:36 +01001007 try
arovir01b0717b52018-09-05 17:03:25 +01001008 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001009 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001010 if (IsDynamicTensor(operandTensorInfo))
1011 {
1012 Fail("%s: dynamic input tensors are not supported", __func__);
1013 return LayerInputHandle();
1014 }
arovir01b0717b52018-09-05 17:03:25 +01001015
Sadik Armagan44bcc022019-06-18 17:21:36 +01001016 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001017 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001018 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001019 {
1020 // NOTE: We must check whether we can support the input tensor on at least one
1021 // of the provided backends; otherwise we cannot convert the operation
1022 bool isInputSupported = false;
1023 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1024 IsInputSupported,
1025 data.m_Backends,
1026 isInputSupported,
1027 operandTensorInfo);
1028
1029 if (!isInputSupported)
1030 {
1031 Fail("%s: unsupported input tensor", __func__);
1032 return LayerInputHandle();
1033 }
1034
1035 BOOST_FALLTHROUGH; // intentional fallthrough
1036 }
1037 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001038 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001039 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001040 // The tensor is either an operand internal to the model, or a model input.
1041 // It can be associated with an ArmNN output slot for an existing layer.
1042
1043 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1044 const uint32_t operandIndex = operation.inputs[inputIndex];
1045 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001046 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001047 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001048 case HalOperandLifeTime::CONSTANT_REFERENCE:
1049 {
1050 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1051 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1052 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001053 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001054 bool isSupported = false;
1055 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1056 IsConstantSupported,
1057 data.m_Backends,
1058 isSupported,
1059 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001060 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001061 {
1062 return LayerInputHandle();
1063 }
1064
1065 armnn::IConnectableLayer* constantLayer =
1066 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1067 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1068 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1069
1070 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1071 }
1072 else
1073 {
1074 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001075 return LayerInputHandle();
1076 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001077 break;
arovir01b0717b52018-09-05 17:03:25 +01001078 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001079 default:
arovir01b0717b52018-09-05 17:03:25 +01001080 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001081 // Unsupported lifetime for an input tensor
1082 Fail("%s: unsupported lifetime for input tensor: %s",
1083 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001084 return LayerInputHandle();
1085 }
arovir01b0717b52018-09-05 17:03:25 +01001086 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001087 }
1088 catch (UnsupportedOperand<HalOperandType>& e)
1089 {
1090 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1091 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001092 }
1093}
1094
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001095template<typename HalPolicy,
1096 typename HalOperation = typename HalPolicy::Operation,
1097 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001098bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1099 uint32_t operationOutputIndex,
1100 armnn::IConnectableLayer& layer,
1101 uint32_t layerOutputIndex,
1102 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001103 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001104{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001105 using HalOperand = typename HalPolicy::Operand;
1106
1107 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001108 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1109 {
1110 return false;
1111 }
1112
1113 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1114
1115 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1116 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1117
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001118 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001119
1120 return true;
1121}
1122
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001123template<typename HalPolicy,
1124 typename HalOperation = typename HalPolicy::Operation,
1125 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001126armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1127 uint32_t inputIndex,
1128 const HalModel& model,
1129 ConversionData& data)
1130{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001131 using HalOperand = typename HalPolicy::Operand;
1132
1133 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001134 if (!operand)
1135 {
1136 return armnn::DataLayout::NHWC;
1137 }
1138
1139 if (!IsBool(*operand))
1140 {
1141 return armnn::DataLayout::NHWC;
1142 }
1143
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001144 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001145 if (!valueAddress)
1146 {
1147 return armnn::DataLayout::NHWC;
1148 }
1149
1150 if (*(static_cast<const bool*>(valueAddress)))
1151 {
1152 return armnn::DataLayout::NCHW;
1153 }
1154 else
1155 {
1156 return armnn::DataLayout::NHWC;
1157 }
1158}
1159
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001160template<typename HalPolicy,
1161 typename HalOperation = typename HalPolicy::Operation,
1162 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001163bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1164 uint32_t outputIndex,
1165 armnn::IConnectableLayer& layer,
1166 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001167 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001168{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001169 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1170 outputIndex,
1171 layer,
1172 outputIndex,
1173 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001174 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001175}
1176
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177template<typename HalPolicy,
1178 typename HalOperation = typename HalPolicy::Operation,
1179 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001180bool ConvertToActivation(const HalOperation& operation,
1181 const char* operationName,
1182 const armnn::ActivationDescriptor& activationDesc,
1183 const HalModel& model,
1184 ConversionData& data)
1185{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001186 using HalOperand = typename HalPolicy::Operand;
1187
1188 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001189 if (!input.IsValid())
1190 {
1191 return Fail("%s: Input 0 is invalid", operationName);
1192 }
1193
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001194 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001195 if (!outputOperand)
1196 {
1197 return false;
1198 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001199
1200 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001201 if (IsDynamicTensor(outInfo))
1202 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001203 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001204 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001205
1206 bool isSupported = false;
1207 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1208 IsActivationSupported,
1209 data.m_Backends,
1210 isSupported,
1211 input.GetTensorInfo(),
1212 outInfo,
1213 activationDesc);
1214 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001215 {
1216 return false;
1217 }
1218
1219 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1220 BOOST_ASSERT(layer != nullptr);
1221 input.Connect(layer->GetInputSlot(0));
1222
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001223 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001224}
1225
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001226template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001227 typename HalOperation = typename HalPolicy::Operation,
1228 typename HalModel = typename HalPolicy::Model>
1229bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1230{
1231 armnn::ActivationDescriptor desc;
1232 desc.m_Function = armnn::ActivationFunction::ReLu;
1233
1234 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1235}
1236
1237template<typename HalPolicy,
1238 typename HalOperation = typename HalPolicy::Operation,
1239 typename HalModel = typename HalPolicy::Model>
1240bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1241{
1242 armnn::ActivationDescriptor desc;
1243 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1244 desc.m_A = 1.0f;
1245 desc.m_B = -1.0f;
1246
1247 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1248}
1249
1250template<typename HalPolicy,
1251 typename HalOperation = typename HalPolicy::Operation,
1252 typename HalModel = typename HalPolicy::Model>
1253bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1254{
1255 armnn::ActivationDescriptor desc;
1256 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1257 desc.m_A = 6.0f;
1258
1259 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1260}
1261
1262template<typename HalPolicy,
1263 typename HalOperation = typename HalPolicy::Operation,
1264 typename HalModel = typename HalPolicy::Model>
1265bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1266{
1267 armnn::ActivationDescriptor desc;
1268 desc.m_Function = armnn::ActivationFunction::TanH;
1269 desc.m_A = 1.0f; // android nn does not support tanH parameters
1270 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1271
1272 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1273}
1274
1275template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001276 typename HalOperation = typename HalPolicy::Operation,
1277 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001278bool ConvertPaddings(const HalOperation& operation,
1279 const HalModel& model,
1280 ConversionData& data,
1281 unsigned int rank,
1282 armnn::PadDescriptor& padDescriptor)
1283{
1284 using HalOperand = typename HalPolicy::Operand;
1285
1286 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1287 if (!paddingsOperand)
1288 {
1289 return Fail("%s: Could not read paddings operand", __func__);
1290 }
1291
1292 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1293 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1294 {
1295 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1296 }
1297
1298 std::vector<int32_t> paddings;
1299 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1300
1301 // add padding for each dimension of input tensor.
1302 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1303 {
1304 int paddingBeforeInput = paddings[i];
1305 int paddingAfterInput = paddings[i + 1];
1306
1307 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1308 {
1309 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1310 }
1311
1312 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1313 }
1314
1315 return true;
1316}
1317
1318template<typename HalPolicy,
1319 typename HalOperation = typename HalPolicy::Operation,
1320 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001321bool ConvertPooling2d(const HalOperation& operation,
1322 const char* operationName,
1323 armnn::PoolingAlgorithm poolType,
1324 const HalModel& model,
1325 ConversionData& data)
1326{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001327 using HalOperand = typename HalPolicy::Operand;
1328 using HalOperandType = typename HalPolicy::OperandType;
1329
1330 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001331 if (!input.IsValid())
1332 {
1333 return Fail("%s: Could not read input 0", operationName);
1334 }
1335
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001336 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001337 if (!output)
1338 {
1339 return Fail("%s: Could not read output 0", __func__);
1340 }
1341
1342 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1343 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1344
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001345 if (IsDynamicTensor(outputInfo))
1346 {
1347 return Fail("%s: Dynamic output tensors are not supported", __func__);
1348 }
1349
arovir01b0717b52018-09-05 17:03:25 +01001350 armnn::Pooling2dDescriptor desc;
1351 desc.m_PoolType = poolType;
1352 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001353 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001354
1355 ActivationFn activation;
1356
Sadik Armagan15d63e22019-07-26 16:59:35 +01001357 auto inputSize = operation.inputs.size();
1358
1359 if (inputSize >= 10)
1360 {
1361 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1362 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1363 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1364 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1365 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1366 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1367 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1368 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1369 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1370 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1371 {
1372 return Fail("%s: Operation has invalid inputs", operationName);
1373 }
1374
1375 if (Is12Operand(*output))
1376 {
1377 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1378 }
1379 }
1380 else
arovir01b0717b52018-09-05 17:03:25 +01001381 {
1382 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1383 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001384 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1385 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1386 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1387 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1388 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1389 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001390 {
1391 return Fail("%s: Operation has invalid inputs", operationName);
1392 }
1393
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001394 const unsigned int inputWidth = inputInfo.GetShape()[2];
1395 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001396
1397 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1398 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001399
1400 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001401 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001402 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001403 }
1404 }
1405
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001406 bool isSupported = false;
1407 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1408 IsPooling2dSupported,
1409 data.m_Backends,
1410 isSupported,
1411 inputInfo,
1412 outputInfo,
1413 desc);
1414 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001415 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001416 return false;
arovir01b0717b52018-09-05 17:03:25 +01001417 }
arovir01b0717b52018-09-05 17:03:25 +01001418
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001419 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1420 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001421 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001422 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001423 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001424
1425 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1426 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001427 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001428 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001429 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001430
1431 input.Connect(pooling2dLayer->GetInputSlot(0));
1432
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001433 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001434}
1435
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001436template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001437 typename Operation = typename HalPolicy::Operation,
1438 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001439bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1440{
1441 using Operand = typename HalPolicy::Operand;
1442
1443 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1444 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1445
1446 if (!input0.IsValid() || !input1.IsValid())
1447 {
1448 return Fail("%s: Operation has invalid inputs", __func__);
1449 }
1450
1451 // The FuseActivation parameter is always the input index 2
1452 // and it should be optional
1453 ActivationFn activationFunction;
1454 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1455 {
1456 return Fail("%s: Operation has invalid inputs", __func__);
1457 }
1458
1459 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1460 if (!outputOperand)
1461 {
1462 return false;
1463 }
1464
1465 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1466 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1467
1468 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1469 if (IsDynamicTensor(outputInfo))
1470 {
1471 return Fail("%s: Dynamic output tensors are not supported", __func__);
1472 }
1473
1474 bool isSupported = false;
1475 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1476 IsAdditionSupported,
1477 data.m_Backends,
1478 isSupported,
1479 inputInfo0,
1480 inputInfo1,
1481 outputInfo);
1482 if (!isSupported)
1483 {
1484 return false;
1485 }
1486
1487 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1488 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1489
1490 if (endLayer != nullptr)
1491 {
1492 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1493 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1494 }
1495 else
1496 {
1497 return Fail("%s: ProcessActivation failed", __func__);
1498 }
1499}
1500
1501template<typename HalPolicy,
1502 typename Operation = typename HalPolicy::Operation,
1503 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001504bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1505{
1506 using HalOperand = typename HalPolicy::Operand;
1507 using HalOperandType = typename HalPolicy::OperandType;
1508
1509 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1510 if (operation.inputs.size() <= 1)
1511 {
1512 return Fail("%s: Operation has insufficient arguments", __func__);
1513 }
1514
1515 // Get inputs and outputs
1516 const std::size_t numInputTensors = operation.inputs.size() - 1;
1517
1518 int32_t concatDim;
1519 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1520 {
1521 return Fail("%s: Operation has invalid inputs", __func__);
1522 }
1523
1524 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1525 if (!outputOperand)
1526 {
1527 return Fail("%s: Operation has no outputs", __func__);
1528 }
1529
1530
1531 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1532 armnn::TensorShape outputShape = outputInfo.GetShape();
1533
1534 //
1535 // handle negative concat dims along the lines of tensorflow as described here:
1536 // https://www.tensorflow.org/api_docs/python/tf/concat
1537 // "negative axis refers to axis + rank(values)-th dimension"
1538 //
1539 if (concatDim < 0)
1540 {
1541 concatDim += outputShape.GetNumDimensions();
1542 }
1543
1544 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1545 {
1546 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1547 }
1548
1549 std::vector<LayerInputHandle> inputHandles;
1550 std::vector<armnn::TensorShape> inputShapes;
1551
1552 inputHandles.reserve(numInputTensors);
1553 inputShapes.reserve(numInputTensors);
1554
1555 bool inputsHaveBeenReshaped = false;
1556 unsigned int tensorDimensionsAdded = 0;
1557
1558 for (uint32_t i = 0; i < numInputTensors; ++i)
1559 {
1560 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1561 if (!operand)
1562 {
1563 return Fail("%s: Operation has invalid inputs", __func__);
1564 }
1565
1566 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
1567 LayerInputHandle operandInputHandle =
1568 ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1569
1570 if (operandShape.GetNumDimensions() == 0)
1571 {
1572 return Fail("%s: Operands with rank 0 are not supported", __func__);
1573 }
1574
1575 if (RequiresReshape(operandShape))
1576 {
1577 inputsHaveBeenReshaped = true;
1578
1579 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1580
1581 // Expand the tensor to three dimensions
1582 if (operandShape.GetNumDimensions() == 2)
1583 {
1584 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1585 tensorDimensionsAdded = 1;
1586 }
1587 else
1588 {
1589 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1590 tensorDimensionsAdded = 2;
1591 }
1592
1593 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1594 *data.m_Network,
1595 operandInputHandle,
1596 reshapeInfo
1597 );
1598
1599 // Point to the reshape operation rather then the input operation
1600 operandShape = reshapeInfo.GetShape();
1601 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1602 }
1603
1604 inputShapes.emplace_back(operandShape);
1605 inputHandles.emplace_back(operandInputHandle);
1606
1607 if (!inputHandles.back().IsValid())
1608 {
1609 return Fail("%s: Operation has invalid inputs", __func__);
1610 }
1611 }
1612
1613 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1614
1615 if (inputsHaveBeenReshaped)
1616 {
1617 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1618 concatDim += tensorDimensionsAdded;
1619
1620 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1621 if (tensorDimensionsAdded == 1)
1622 {
1623 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1624 }
1625 else if (tensorDimensionsAdded == 2)
1626 {
1627 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1628 }
1629 }
1630
1631 // Check if permutations is required and get the pair of permutations required for the concatenation.
1632 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1633 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1634 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1635
1636 bool needPermute =
1637 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1638
1639 if (needPermute)
1640 {
1641 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1642 }
1643
1644 outputInfo.SetShape(outputShape);
1645
1646 // this is no-op for identity swizzles, otherwise it replaces both
1647 // the handles and shapes with the swizzled layer output handles and shapes
1648 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1649
1650 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1651 armnn::OriginsDescriptor concatDescriptor;
1652
1653 try
1654 {
1655 // The concat descriptor is always created across the only supported concat dimension
1656 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1657 concatDescriptor =
1658 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1659 }
1660 catch (const armnn::Exception& error)
1661 {
1662 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1663 }
1664
1665 // Validate the output shape is correct given the input shapes based on the
1666 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1667 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1668 {
1669 return Fail("%s: Error validating the output shape for concat", __func__);
1670 }
1671
1672 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1673 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1674 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1675
1676 bool isSupported = false;
1677 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1678 IsConcatSupported,
1679 data.m_Backends,
1680 isSupported,
1681 inputTensorInfos,
1682 outputInfo,
1683 concatDescriptor);
1684 if (!isSupported)
1685 {
1686 return false;
1687 }
1688
1689 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1690 assert(layer != nullptr);
1691 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1692
1693 // Connect inputs to the layer
1694 const int numInputSlots = layer->GetNumInputSlots();
1695 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1696 for (int i = 0; i < numInputSlots; ++i)
1697 {
1698 // connect the input directly to the merge (concat) layer
1699 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1700 }
1701
1702 if (needPermute)
1703 {
1704 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1705 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1706 layer->GetOutputSlot(0),
1707 permutationPair.second);
1708 layer = &deswizzleLayer;
1709 }
1710
1711 if (inputsHaveBeenReshaped)
1712 {
1713 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1714
1715 // Undo the reshape knowing the amount of dimensions added
1716 if (tensorDimensionsAdded == 1)
1717 {
1718 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1719 afterConcatInfo.GetShape()[2] }));
1720 }
1721 else if (tensorDimensionsAdded == 2)
1722 {
1723 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1724 }
1725
1726 layer = &AddReshapeLayer(
1727 *data.m_Network,
1728 layer->GetOutputSlot(0),
1729 afterConcatInfo
1730 );
1731 }
1732
1733 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1734}
1735
1736template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001737 typename HalOperation = typename HalPolicy::Operation,
1738 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001739bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1740{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001741 using HalOperand = typename HalPolicy::Operand;
1742 using HalOperandType = typename HalPolicy::OperandType;
1743
1744 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001745 if (!input.IsValid())
1746 {
1747 return Fail("%s: Operation has invalid inputs", __func__);
1748 }
1749
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001750 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001751 if (!output)
1752 {
1753 return Fail("%s: Could not read output 0", __func__);
1754 }
1755
1756 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001757 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001758
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001759 if (IsDynamicTensor(outputInfo))
1760 {
1761 return Fail("%s: Dynamic output tensors are not supported", __func__);
1762 }
1763
Mike Kellyb5fdf382019-06-11 16:35:25 +01001764 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001765 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1766 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001767
1768 if (!weightsPin.IsValid() || !biasPin.IsValid())
1769 {
1770 return Fail("%s: Operation has invalid inputs", __func__);
1771 }
1772
1773 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001774 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001775 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1776
1777 armnn::Convolution2dDescriptor desc;
1778 desc.m_DataLayout = armnn::DataLayout::NHWC;
1779 ActivationFn activation;
1780
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001781 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001782 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001783 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1784 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1785 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1786 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1787 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1788 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001789 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001790 {
1791 return Fail("%s: Operation has invalid inputs", __func__);
1792 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001793 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001794 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001795 {
1796 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001797 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1798 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1799 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001800 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001801 {
1802 return Fail("%s: Operation has invalid inputs", __func__);
1803 }
1804
1805 const uint32_t kernelX = weights.GetShape()[2];
1806 const uint32_t kernelY = weights.GetShape()[1];
1807 const uint32_t inputX = inputInfo.GetShape()[2];
1808 const uint32_t inputY = inputInfo.GetShape()[1];
1809
1810 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1811 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001812 }
1813 else
1814 {
1815 return Fail("%s: Unsupported number of operation inputs", __func__);
1816 }
1817
1818 desc.m_BiasEnabled = true;
1819 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1820
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001821 bool isSupported = false;
1822 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1823 IsConvolution2dSupported,
1824 data.m_Backends,
1825 isSupported,
1826 inputInfo,
1827 outputInfo,
1828 desc,
1829 weights.GetInfo(),
1830 biases);
1831 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001832 {
1833 return false;
1834 }
1835
1836 armnn::IConnectableLayer* startLayer =
1837 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1838
1839 if (!startLayer)
1840 {
1841 return Fail("%s: AddConvolution2dLayer failed", __func__);
1842 }
1843
1844 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1845
1846 if (!endLayer)
1847 {
1848 return Fail("%s: ProcessActivation failed", __func__);
1849 }
1850
1851 input.Connect(startLayer->GetInputSlot(0));
1852
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001853 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001854}
1855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001856template<typename HalPolicy,
1857 typename HalOperation = typename HalPolicy::Operation,
1858 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001859bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1860{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001861 using HalOperand = typename HalPolicy::Operand;
1862 using HalOperandType = typename HalPolicy::OperandType;
1863
1864 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001865
1866 if (!input.IsValid())
1867 {
1868 return Fail("%s: Operation has invalid inputs", __func__);
1869 }
1870
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001871 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001872
1873 if (!output)
1874 {
1875 return Fail("%s: Could not read output 0", __func__);
1876 }
1877
1878 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001879 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001880
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001881 if (IsDynamicTensor(outputInfo))
1882 {
1883 return Fail("%s: Dynamic output tensors are not supported", __func__);
1884 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001885
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001886 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001887 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001888 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001889
1890 if (weightsOperand == nullptr)
1891 {
1892 return Fail("%s: Operand is invalid", __func__);
1893 }
1894 armnn::DepthwiseConvolution2dDescriptor desc;
1895 desc.m_DataLayout = armnn::DataLayout::NHWC;
1896
Mike Kellyb5fdf382019-06-11 16:35:25 +01001897 // Reinterpret weight data as [ H, W, I, M ]
1898 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1899 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001900 inputInfo.GetShape()[3],
1901 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001902
1903 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1904 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1905
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001906 const ConstTensorPin weightsPin =
1907 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1908 1,
1909 model,
1910 data,
1911 HWIMToMIHW,
1912 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001913
1914 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001915 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001916
1917 if (!weightsPin.IsValid() || !biasPin.IsValid())
1918 {
1919 return Fail("%s: Operation has invalid inputs", __func__);
1920 }
1921
1922 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1923 armnn::ConstTensor bias = biasPin.GetConstTensor();
1924 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1925
1926 ActivationFn activation;
1927
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001928 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001929 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001930 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1931 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1932 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1933 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1934 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1935 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001936 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001937 {
1938 return Fail("%s: Operation has invalid inputs", __func__);
1939 }
1940 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001941 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001942 {
1943 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001944 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1945 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1946 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001947 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001948 {
1949 return Fail("%s: Operation has invalid inputs", __func__);
1950 }
1951
1952 const uint32_t kernelX = weights.GetShape()[3];
1953 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001954 const uint32_t inputX = inputInfo.GetShape()[2];
1955 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001956
1957 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1958 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1959 }
1960 else
1961 {
1962 return Fail("%s: Unsupported number of operation inputs", __func__);
1963 }
1964
1965 desc.m_BiasEnabled = true;
1966 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1967
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001968 bool isSupported = false;
1969 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1970 IsDepthwiseConvolutionSupported,
1971 data.m_Backends,
1972 isSupported,
1973 inputInfo,
1974 outputInfo,
1975 desc,
1976 weights.GetInfo(),
1977 biases);
1978 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001979 {
1980 return false;
1981 }
1982
1983 armnn::IConnectableLayer* startLayer =
1984 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1985 if (!startLayer)
1986 {
1987 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1988 }
1989
1990 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1991 if (!endLayer)
1992 {
1993 return Fail("%s: ProcessActivation failed", __func__);
1994 }
1995
1996 input.Connect(startLayer->GetInputSlot(0));
1997
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001998 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001999}
2000
Mike Kelly3c673942019-07-25 09:26:06 +01002001template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002002 typename Operation = typename HalPolicy::Operation,
2003 typename Model = typename HalPolicy::Model>
2004bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002005{
Mike Kelly46272802019-08-14 17:00:48 +01002006 using Operand = typename HalPolicy::Operand;
2007
2008 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2009 if (!input.IsValid())
2010 {
2011 return Fail("%s: Operation has invalid input", __func__);
2012 }
2013
2014 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2015 if (!outputOperand)
2016 {
2017 return Fail("%s: Operation has invalid outputs", __func__);
2018 }
2019
2020 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2021 if (IsDynamicTensor(outputInfo))
2022 {
2023 return Fail("%s: Dynamic output tensors are not supported", __func__);
2024 }
2025
2026 bool isSupported = false;
2027 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2028 IsDequantizeSupported,
2029 data.m_Backends,
2030 isSupported,
2031 input.GetTensorInfo(),
2032 GetTensorInfoForOperand(*outputOperand));
2033 if (!isSupported)
2034 {
2035 return false;
2036 }
2037
2038 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2039 assert(layer != nullptr);
2040 input.Connect(layer->GetInputSlot(0));
2041
2042 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2043}
2044
2045template<typename HalPolicy,
2046 typename Operation = typename HalPolicy::Operation,
2047 typename Model = typename HalPolicy::Model>
2048bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2049{
2050 using Operand = typename HalPolicy::Operand;
2051
2052 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2053 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2054
2055 if (!input0.IsValid() || !input1.IsValid())
2056 {
2057 return Fail("%s: Operation has invalid inputs", __func__);
2058 }
2059
2060 // The FuseActivation parameter is always the input index 2
2061 // and it should be optional
2062 ActivationFn activationFunction;
2063 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2064 {
2065 return Fail("%s: Operation has invalid inputs", __func__);
2066 }
2067
2068 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2069 if (!output)
2070 {
2071 return Fail("%s: Could not read output 0", __func__);
2072 }
2073
2074 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2075 if (IsDynamicTensor(outputInfo))
2076 {
2077 return Fail("%s: Dynamic output tensors are not supported", __func__);
2078 }
2079
2080 bool isSupported = false;
2081 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2082 IsDivisionSupported,
2083 data.m_Backends,
2084 isSupported,
2085 input0.GetTensorInfo(),
2086 input1.GetTensorInfo(),
2087 outputInfo);
2088 if (!isSupported)
2089 {
2090 return false;
2091 }
2092
2093 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2094 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2095
2096 if (endLayer)
2097 {
2098 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
2099 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2100 }
2101 return Fail("%s: ProcessActivation failed", __func__);
2102}
2103
2104template<typename HalPolicy,
2105 typename Operation = typename HalPolicy::Operation,
2106 typename Model = typename HalPolicy::Model>
2107bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2108{
2109 using Operand = typename HalPolicy::Operand;
2110
2111 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2112 if (!input.IsValid())
2113 {
2114 return Fail("%s: Operation has invalid inputs", __func__);
2115 }
2116
2117 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2118 if (!outputOperand)
2119 {
2120 return Fail("%s: Operation has invalid outputs", __func__);
2121 }
2122
2123 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2124 if (IsDynamicTensor(outputInfo))
2125 {
2126 return Fail("%s: Dynamic output tensors are not supported", __func__);
2127 }
2128
2129 bool isSupported = false;
2130 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2131 IsFloorSupported,
2132 data.m_Backends,
2133 isSupported,
2134 input.GetTensorInfo(),
2135 outputInfo);
2136 if (!isSupported)
2137 {
2138 return false;
2139 }
2140
2141 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2142 assert(layer != nullptr);
2143 input.Connect(layer->GetInputSlot(0));
2144
2145 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2146}
2147
2148template<typename HalPolicy,
2149 typename Operation = typename HalPolicy::Operation,
2150 typename Model = typename HalPolicy::Model>
2151bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2152{
2153 using Operand = typename HalPolicy::Operand;
2154
2155 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2156 if (!input.IsValid())
2157 {
2158 return Fail("%s: Operation has invalid inputs", __func__);
2159 }
2160
2161 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2162 if (!output)
2163 {
2164 return Fail("%s: Could not read output 0", __func__);
2165 }
2166
2167 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2168 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2169
2170 if (IsDynamicTensor(outputInfo))
2171 {
2172 return Fail("%s: Dynamic output tensors are not supported", __func__);
2173 }
2174
2175 // ArmNN does not currently support non-fixed weights or bias
2176 ConstTensorPin weightsPin =
2177 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2178 ConstTensorPin biasPin =
2179 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2180
2181 if (!weightsPin.IsValid() || !biasPin.IsValid())
2182 {
2183 return Fail("%s: Operation has invalid inputs", __func__);
2184 }
2185
2186 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2187 armnn::ConstTensor bias = biasPin.GetConstTensor();
2188 armnn::TensorInfo reshapedInfo = inputInfo;
2189
2190 try
2191 {
2192 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2193 } catch (const std::exception &e) {
2194 return Fail("%s: %s", __func__, e.what());
2195 }
2196
2197 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2198 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2199
2200 ActivationFn activationFunction;
2201 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2202 {
2203 return Fail("%s: Operation has invalid inputs", __func__);
2204 }
2205
2206 armnn::FullyConnectedDescriptor desc;
2207 desc.m_TransposeWeightMatrix = true;
2208 desc.m_BiasEnabled = true;
2209
2210 bool isSupported = false;
2211 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2212 IsFullyConnectedSupported,
2213 data.m_Backends,
2214 isSupported,
2215 reshapedInfo,
2216 outputInfo,
2217 weights.GetInfo(),
2218 bias.GetInfo(),
2219 desc);
2220 if (!isSupported)
2221 {
2222 return false;
2223 }
2224
2225 armnn::IConnectableLayer* startLayer =
2226 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2227 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2228
2229 if (endLayer != nullptr)
2230 {
2231 if (inputInfo.GetNumDimensions() > 2U)
2232 {
2233 armnn::ReshapeDescriptor reshapeDescriptor;
2234 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2235
2236 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2237 assert(reshapeLayer != nullptr);
2238 input.Connect(reshapeLayer->GetInputSlot(0));
2239 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2240 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2241 }
2242 else
2243 {
2244 input.Connect(startLayer->GetInputSlot(0));
2245 }
2246
2247 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2248 }
2249 else
2250 {
2251 return Fail("%s: ProcessActivation failed", __func__);
2252 }
2253}
2254
2255template<typename HalPolicy,
2256 typename Operation = typename HalPolicy::Operation,
2257 typename Model = typename HalPolicy::Model>
2258bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2259{
2260 using Operand = typename HalPolicy::Operand;
2261
2262 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2263 if (!input.IsValid())
2264 {
2265 return Fail("%s: Operation has invalid inputs", __func__);
2266 }
2267
2268 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2269 if (!output)
2270 {
2271 return Fail("%s: Could not read output 0", __func__);
2272 }
2273
2274 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2275 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2276
2277 if (IsDynamicTensor(outputInfo))
2278 {
2279 return Fail("%s: Dynamic output tensors are not supported", __func__);
2280 }
2281 if (outputInfo.GetNumDimensions() != 4u)
2282 {
2283 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2284 }
2285
2286 armnn::L2NormalizationDescriptor desc;
2287 desc.m_DataLayout = armnn::DataLayout::NHWC;
2288
2289 bool isSupported = false;
2290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2291 IsL2NormalizationSupported,
2292 data.m_Backends,
2293 isSupported,
2294 inputInfo,
2295 outputInfo,
2296 desc);
2297 if (!isSupported)
2298 {
2299 return false;
2300 }
2301
2302 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2303 assert(layer != nullptr);
2304 input.Connect(layer->GetInputSlot(0));
2305
2306 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2307}
2308
2309template<typename HalPolicy,
2310 typename Operation = typename HalPolicy::Operation,
2311 typename Model = typename HalPolicy::Model>
2312bool ConvertLocalResponseNormalization(const Operation& operation,
2313 const Model& model,
2314 ConversionData& data)
2315{
2316 using Operand = typename HalPolicy::Operand;
2317 using OperandType = typename HalPolicy::OperandType;
2318
2319 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2320 if (!input.IsValid())
2321 {
2322 return Fail("%s: Operation has invalid inputs", __func__);
2323 }
2324
2325 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2326 if (!output)
2327 {
2328 return Fail("%s: Could not read output 0", __func__);
2329 }
2330
2331 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2332 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2333
2334 if (IsDynamicTensor(outputInfo))
2335 {
2336 return Fail("%s: Dynamic output tensors are not supported", __func__);
2337 }
2338 if (outputInfo.GetNumDimensions() != 4u)
2339 {
2340 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2341 }
2342
2343 armnn::NormalizationDescriptor descriptor;
2344 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2345 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2346 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2347
2348 if (!input.IsValid() ||
2349 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2350 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2351 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2352 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2353 {
2354 return Fail("%s: Operation has invalid inputs", __func__);
2355 }
2356
2357 // ArmNN expects normSize to be the full size of the normalization
2358 // window rather than the radius as in AndroidNN.
2359 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2360
2361 bool isSupported = false;
2362 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2363 IsNormalizationSupported,
2364 data.m_Backends,
2365 isSupported,
2366 inputInfo,
2367 outputInfo,
2368 descriptor);
2369 if (!isSupported)
2370 {
2371 return false;
2372 }
2373
2374
2375 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2376 assert(layer != nullptr);
2377 input.Connect(layer->GetInputSlot(0));
2378
2379 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2380}
2381
2382template<typename HalPolicy,
2383 typename Operation = typename HalPolicy::Operation,
2384 typename Model = typename HalPolicy::Model>
2385bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2386{
2387 using Operand = typename HalPolicy::Operand;
2388
2389 armnn::ActivationDescriptor desc;
2390 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2391
2392 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2393}
2394
2395template<typename HalPolicy,
2396 typename Operation = typename HalPolicy::Operation,
2397 typename Model = typename HalPolicy::Model>
2398bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2399{
2400 using Operand = typename HalPolicy::Operand;
2401
2402 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2403 if (!input.IsValid())
2404 {
2405 return Fail("%s: Operation has invalid inputs", __func__);
2406 }
2407
2408 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2409 if (!output)
2410 {
2411 return Fail("%s: Could not read output 0", __func__);
2412 }
2413
2414 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2415 if (IsDynamicTensor(outputInfo))
2416 {
2417 return Fail("%s: Dynamic output tensors are not supported", __func__);
2418 }
2419
2420 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2421 if (!axisOperand)
2422 {
2423 return Fail("%s: Could not read input 1", __func__);
2424 }
2425
2426 std::vector<int32_t> axis;
2427 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2428 {
2429 return Fail("%s: Input 1 has invalid values", __func__);
2430 }
2431
2432 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2433
2434 // Convert the axis to unsigned int and remove duplicates.
2435 unsigned int rank = inputInfo.GetNumDimensions();
2436 std::set<unsigned int> uniqueAxis;
2437 std::transform(axis.begin(), axis.end(),
2438 std::inserter(uniqueAxis, uniqueAxis.begin()),
2439 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2440
2441 // Get the "keep dims" flag.
2442 int32_t keepDims = 0;
2443 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2444 {
2445 return Fail("%s: Could not read input 2", __func__);
2446 }
2447
2448 armnn::MeanDescriptor descriptor;
2449 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2450 descriptor.m_KeepDims = keepDims > 0;
2451
2452 bool isSupported = false;
2453 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2454 IsMeanSupported,
2455 data.m_Backends,
2456 isSupported,
2457 inputInfo,
2458 outputInfo,
2459 descriptor);
2460 if (!isSupported)
2461 {
2462 return false;
2463 }
2464
2465 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2466 assert(layer != nullptr);
2467 input.Connect(layer->GetInputSlot(0));
2468
2469 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2470}
2471
2472template<typename HalPolicy,
2473 typename Operation = typename HalPolicy::Operation,
2474 typename Model = typename HalPolicy::Model>
2475bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2476{
2477 using Operand = typename HalPolicy::Operand;
2478
2479 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2480 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2481
2482 if (!input0.IsValid() || !input1.IsValid())
2483 {
2484 return Fail("%s: Operation has invalid inputs", __func__);
2485 }
2486
2487 // The FuseActivation parameter is always the input index 2
2488 // and it should be optional
2489 ActivationFn activationFunction;
2490 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2491 {
2492 return Fail("%s: Operation has invalid inputs", __func__);
2493 }
2494
2495 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2496
2497 if (outputOperand == nullptr)
2498 {
2499 return false;
2500 }
2501
2502 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2503 if (IsDynamicTensor(outputInfo))
2504 {
2505 return Fail("%s: Dynamic output tensors are not supported", __func__);
2506 }
2507
2508 bool isSupported = false;
2509 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2510 IsMultiplicationSupported,
2511 data.m_Backends,
2512 isSupported,
2513 input0.GetTensorInfo(),
2514 input1.GetTensorInfo(),
2515 outputInfo);
2516 if (!isSupported)
2517 {
2518 return false;
2519 }
2520
2521 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2522 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2523
2524 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2525 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2526
2527 if (endLayer != nullptr)
2528 {
2529 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
2530 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2531 }
2532 else
2533 {
2534 return Fail("%s: ProcessActivation failed", __func__);
2535 }
2536}
2537
2538template<typename HalPolicy,
2539 typename Operation = typename HalPolicy::Operation,
2540 typename Model = typename HalPolicy::Model>
2541bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2542{
2543 using Operand = typename HalPolicy::Operand;
2544
Mike Kelly3c673942019-07-25 09:26:06 +01002545 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2546 if (!input.IsValid())
2547 {
2548 return Fail("%s: Operation has invalid inputs", __func__);
2549 }
2550
2551 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2552 unsigned int rank = inputInfo.GetNumDimensions();
2553
2554 armnn::PadDescriptor descriptor;
2555 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2556 {
2557 return Fail("%s: Could not convert paddings", __func__);
2558 }
2559
2560 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2561 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2562 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2563 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2564 {
2565 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2566 }
2567
Mike Kelly46272802019-08-14 17:00:48 +01002568 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002569 if (!output)
2570 {
2571 return Fail("%s: Could not read output", __func__);
2572 }
2573
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002574 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002575 if (IsDynamicTensor(outputInfo))
2576 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002577 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002578 }
2579
2580 bool isSupported = false;
2581 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2582 IsPadSupported,
2583 data.m_Backends,
2584 isSupported,
2585 inputInfo,
2586 outputInfo,
2587 descriptor);
2588 if (!isSupported)
2589 {
2590 return false;
2591 }
2592
2593 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2594 assert(layer != nullptr);
2595 input.Connect(layer->GetInputSlot(0));
2596 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2597
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002598 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002599}
2600
Mike Kelly0a879362019-07-29 16:56:31 +01002601template<typename HalPolicy,
2602 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002603 typename Model = typename HalPolicy::Model>
2604bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2605{
2606 using Operand = typename HalPolicy::Operand;
2607
2608 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2609 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2610 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2611
2612 if (inputOperand == nullptr
2613 || requestedShapeOperand == nullptr
2614 || outputOperand == nullptr)
2615 {
2616 return Fail("%s: Operation has invalid inputs", __func__);
2617 }
2618
2619 if (requestedShapeOperand->dimensions.size() != 1)
2620 {
2621 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2622 __func__, requestedShapeOperand->dimensions.size());
2623 }
2624
2625 std::vector<int32_t> targetDimensions;
2626 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2627 {
2628 return Fail("%s: Could not read values of input 1", __func__);
2629 }
2630
2631 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2632
2633 Shape requestedShape;
2634 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2635 // function that resolves these values into a fully specified tensor shape.
2636 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2637 {
2638 return Fail("%s: Failed to resolve the requested shape", __func__);
2639 }
2640
2641 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2642 if (!SameShape(requestedShape, outputOperandShape))
2643 {
2644 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2645 }
2646
2647 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2648 if (!input.IsValid())
2649 {
2650 return Fail("%s: Could not read input 0", __func__);
2651 }
2652
2653 armnn::ReshapeDescriptor reshapeDescriptor;
2654 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2655 requestedShape.dimensions.data());
2656
2657 bool isSupported = false;
2658 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2659 IsReshapeSupported,
2660 data.m_Backends,
2661 isSupported,
2662 input.GetTensorInfo(),
2663 reshapeDescriptor);
2664 if (!isSupported)
2665 {
2666 return false;
2667 }
2668
2669 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2670 assert(layer != nullptr);
2671 input.Connect(layer->GetInputSlot(0));
2672
2673 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2674}
2675
2676template<typename HalPolicy,
2677 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002678 typename Model = typename HalPolicy::Model>
2679bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2680{
Mike Kelly46272802019-08-14 17:00:48 +01002681 using Operand = typename HalPolicy::Operand;
2682
Mike Kelly0a879362019-07-29 16:56:31 +01002683 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2684 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2685
2686 if (!input0.IsValid() || !input1.IsValid())
2687 {
2688 return Fail("%s: Operation has invalid inputs", __func__);
2689 }
2690
2691 // The FuseActivation parameter is always the input index 2
2692 // and it should be optional
2693 ActivationFn activationFunction;
2694 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2695 {
2696 return Fail("%s: Operation has invalid inputs", __func__);
2697 }
2698
2699 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2700 if (!output)
2701 {
2702 return Fail("%s: Could not read output 0", __func__);
2703 }
2704
2705 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2706 if (IsDynamicTensor(outputInfo))
2707 {
2708 return Fail("%s: Dynamic output tensors are not supported", __func__);
2709 }
2710
2711 bool isSupported = false;
2712 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2713 IsSubtractionSupported,
2714 data.m_Backends,
2715 isSupported,
2716 input0.GetTensorInfo(),
2717 input1.GetTensorInfo(),
2718 outputInfo);
2719 if (!isSupported)
2720 {
2721 return false;
2722 }
2723
2724 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2725 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2726
2727 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2728 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2729
2730 if (endLayer)
2731 {
2732 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
2733 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2734 }
2735
2736 return Fail("%s: ProcessActivation failed", __func__);
2737}
2738
Finn Williams23b87b32019-07-30 11:44:05 +01002739template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002740 typename Operation = typename HalPolicy::Operation,
2741 typename Model = typename HalPolicy::Model>
2742bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2743{
2744 using Operand = typename HalPolicy::Operand;
2745
2746 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2747 if (!input.IsValid())
2748 {
2749 return Fail("%s: Operation has invalid inputs", __func__);
2750 }
2751
2752 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2753 unsigned int rank = inputInfo.GetNumDimensions();
2754 if (rank > 4)
2755 {
2756 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2757 }
2758
2759 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2760 if (!output)
2761 {
2762 return Fail("%s: Could not read output 0", __func__);
2763 }
2764
2765 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2766 {
2767 return Fail("%s: Dynamic output tensors are not supported", __func__);
2768 }
2769
2770 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2771 // if the operand index is out of bounds.
2772 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2773
2774 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2775
2776 std::vector<int32_t> axis;
2777 if (!axisOperand)
2778 {
2779 axis.assign(dimensionSequence,
2780 dimensionSequence + rank);
2781 }
2782 else
2783 {
2784 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2785 }
2786
2787 std::vector<uint32_t> outputDims;
2788 for (unsigned int i = 0; i < rank; i++)
2789 {
2790 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2791 auto currentDimension = inputInfo.GetShape()[i];
2792 if (skipSqueeze || currentDimension != 1)
2793 {
2794 outputDims.push_back(currentDimension);
2795 }
2796 }
2797
2798 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2799
2800 armnn::TensorInfo outputInfo = inputInfo;
2801 outputInfo.SetShape(outShape);
2802
2803 armnn::ReshapeDescriptor reshapeDesc;
2804 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2805
2806 bool isSupported = false;
2807 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2808 IsReshapeSupported,
2809 data.m_Backends,
2810 isSupported,
2811 inputInfo,
2812 reshapeDesc);
2813 if (!isSupported)
2814 {
2815 return false;
2816 }
2817
2818 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2819 assert(layer != nullptr);
2820 input.Connect(layer->GetInputSlot(0));
2821
2822 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2823}
2824
2825template<typename HalPolicy,
2826 typename Operation = typename HalPolicy::Operation,
2827 typename Model = typename HalPolicy::Model>
2828bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2829{
2830 using Operand = typename HalPolicy::Operand;
2831
2832 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2833 if (!input.IsValid())
2834 {
2835 return Fail("%s: Operation has invalid inputs", __func__);
2836 }
2837
2838 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2839 unsigned int rank = inputInfo.GetNumDimensions();
2840 if (rank > 4)
2841 {
2842 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2843 }
2844
2845 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2846 if (!output)
2847 {
2848 return Fail("%s: Could not read output 0", __func__);
2849 }
2850
2851 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2852 if (IsDynamicTensor(outputInfo))
2853 {
2854 return Fail("%s: Dynamic output tensors are not supported", __func__);
2855 }
2856
2857 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2858 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2859 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
2860
2861 std::vector<int32_t> beginValues;
2862 std::vector<int32_t> endValues;
2863 std::vector<int32_t> stridesValues;
2864
2865 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
2866 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
2867 {
2868 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
2869 {
2870 return false;
2871 }
2872
2873 if (operandValues.size() != rank)
2874 {
2875 return false;
2876 }
2877
2878 return true;
2879 };
2880
2881 if (!ValidateInputOperands(*beginOperand, beginValues)
2882 || !ValidateInputOperands(*endOperand, endValues)
2883 || !ValidateInputOperands(*stridesOperand, stridesValues))
2884 {
2885 return Fail("%s: Operation has invalid input operand", __func__);
2886 }
2887
2888 // Stride cannot have value '0'
2889 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
2890 {
2891 return Fail("%s: Stride must be non-zero value.", __func__);
2892 }
2893
2894 armnn::StridedSliceDescriptor descriptor;
2895 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
2896 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
2897 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
2898 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2899
2900 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
2901 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
2902 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
2903 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
2904 {
2905 return Fail("%s: Operation has invalid inputs", __func__);
2906 }
2907
2908 bool isSupported = false;
2909 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2910 IsStridedSliceSupported,
2911 data.m_Backends,
2912 isSupported,
2913 inputInfo,
2914 outputInfo,
2915 descriptor);
2916 if (!isSupported)
2917 {
2918 return false;
2919 }
2920
2921 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
2922 assert(layer != nullptr);
2923 input.Connect(layer->GetInputSlot(0));
2924
2925 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2926}
2927
2928template<typename HalPolicy,
2929 typename Operation = typename HalPolicy::Operation,
2930 typename Model = typename HalPolicy::Model>
2931bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
2932{
2933 using Operand = typename HalPolicy::Operand;
2934
2935 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2936 if (!input.IsValid())
2937 {
2938 return Fail("%s: Operation has invalid inputs", __func__);
2939 }
2940
2941 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2942 unsigned int rank = inputInfo.GetNumDimensions();
2943 if (rank > 4)
2944 {
2945 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2946 }
2947
2948 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
2949 // if the operand index is out of bounds.
2950 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2951
2952 std::vector<int32_t> perm(rank);
2953 if (!permOperand)
2954 {
2955 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
2956 for (unsigned int i = rank; i > 0; i--)
2957 {
2958 perm[rank - i] = boost::numeric_cast<int> (i - 1);
2959 }
2960 }
2961 else
2962 {
2963 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
2964 }
2965
2966 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
2967
2968 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
2969 if (!permutationVector.IsEqual(NHWCToArmNN)
2970 && !permutationVector.IsEqual(ArmNNToNHWC)
2971 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
2972 {
2973 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
2974 }
2975
2976 armnn::PermuteDescriptor permuteDesc;
2977 permuteDesc.m_DimMappings = permutationVector;
2978
2979 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2980 if (!output)
2981 {
2982 return Fail("%s: Could not read output 0", __func__);
2983 }
2984
2985 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2986
2987 bool isSupported = false;
2988 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2989 IsPermuteSupported,
2990 data.m_Backends,
2991 isSupported,
2992 inputInfo,
2993 outputInfo,
2994 permuteDesc);
2995 if (!isSupported)
2996 {
2997 return false;
2998 }
2999
3000 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3001 assert(layer != nullptr);
3002 input.Connect(layer->GetInputSlot(0));
3003
3004 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3005}
3006
3007template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003008 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003009 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003010 typename HalModel = typename HalPolicy::Model>
3011bool ConvertBatchToSpaceNd(const HalOperation& operation,
3012 const HalModel& model,
3013 ConversionData& data)
3014{
Finn Williams23b87b32019-07-30 11:44:05 +01003015
3016 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3017 if (!input.IsValid())
3018 {
3019 return Fail("%s: Operation has invalid inputs", __func__);
3020 }
3021
3022 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3023 if (!output)
3024 {
3025 return Fail("%s: Could not read output 0", __func__);
3026 }
3027
3028 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3029 if (IsDynamicTensor(outputInfo))
3030 {
3031 return Fail("%s: Dynamic output tensors are not supported", __func__);
3032 }
3033
3034 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3035 if (!blockOperand)
3036 {
3037 return Fail("%s: Could not read input 1", __func__);
3038 }
3039
3040 // Convert the block operand to int32
3041 std::vector<int32_t> block;
3042 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3043 {
3044 return Fail("%s: Input 1 has invalid values", __func__);
3045 }
3046
3047 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3048
3049 unsigned int rank = inputInfo.GetNumDimensions();
3050 if (rank != 4)
3051 {
3052 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3053 }
3054
3055 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3056 {
3057 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3058 " greater than or equal to 1", __func__);
3059 }
3060
3061 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3062 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3063 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3064
3065 if (Is12Operand(*output))
3066 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003067 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003068 }
3069 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3070 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3071
3072 bool isSupported = false;
3073 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3074 IsBatchToSpaceNdSupported,
3075 data.m_Backends,
3076 isSupported,
3077 inputInfo,
3078 outputInfo,
3079 batchToSpaceNdDesc);
3080 if (!isSupported)
3081 {
3082 return false;
3083 }
3084
3085 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3086 assert(layer != nullptr);
3087 input.Connect(layer->GetInputSlot(0));
3088
3089 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3090}
Mike Kelly0a879362019-07-29 16:56:31 +01003091
Finn Williamsd74c5052019-07-30 17:06:00 +01003092template<typename HalPolicy,
3093 typename HalOperation = typename HalPolicy::Operation,
3094 typename HalOperand = typename HalPolicy::Operand,
3095 typename HalModel = typename HalPolicy::Model>
3096bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3097{
3098 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3099 if (!input.IsValid())
3100 {
3101 return Fail("%s: Operation has invalid inputs", __func__);
3102 }
3103
3104 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3105 unsigned int rank = inputInfo.GetNumDimensions();
3106 unsigned int spatialDim = rank - 2;
3107
3108 if (rank != 4)
3109 {
3110 Fail("%s: Only inputs with rank 4 are supported", __func__);
3111 }
3112
3113 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3114 if (!output)
3115 {
3116 return Fail("%s: Could not read output 0", __func__);
3117 }
3118
3119 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3120 if (IsDynamicTensor(outputInfo))
3121 {
3122 return Fail("%s: Dynamic output tensors are not supported", __func__);
3123 }
3124
3125 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3126 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3127
3128 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3129 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3130 {
3131 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3132 }
3133
3134 std::vector<int32_t> blockShape;
3135 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3136 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3137 {
3138 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3139 }
3140
3141 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3142 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3143 {
3144 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3145 }
3146
3147 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3148 std::vector<int32_t> paddings;
3149 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3150 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3151 {
3152 int paddingBeforeInput = paddings[i];
3153 int paddingAfterInput = paddings[i + 1];
3154 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3155 {
3156 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3157 }
3158
3159 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3160 }
3161
3162 armnn::SpaceToBatchNdDescriptor descriptor;
3163 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3164 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3165 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3166
3167 if (Is12Operand(*output))
3168 {
3169 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3170 }
3171
3172 bool isSupported = false;
3173 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3174 IsSpaceToBatchNdSupported,
3175 data.m_Backends,
3176 isSupported,
3177 inputInfo,
3178 outputInfo,
3179 descriptor);
3180 if (!isSupported)
3181 {
3182 return false;
3183 }
3184
3185 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3186 assert(layer != nullptr);
3187 input.Connect(layer->GetInputSlot(0));
3188
3189 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3190}
3191
saoste01b8471482018-10-10 09:44:51 +01003192} // namespace armnn_driver