blob: de255803e8031a1553fa7a274da4c9dd9f2e9dcd [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Mike Kellyb5fdf382019-06-11 16:35:25 +0100169template<typename Operand>
170armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
191 type == V1_2::OperandType::TENSOR_INT32;
192}
193
194#endif
195
196inline bool IsBool(V1_0::Operand)
197{
198 return false;
199}
200
Sadik Armagan61113162019-07-25 09:09:40 +0100201inline bool Is12Operand(V1_0::Operand)
202{
203 return false;
204}
205
Mike Kellyb5fdf382019-06-11 16:35:25 +0100206#ifdef ARMNN_ANDROID_NN_V1_2
207
208inline bool IsBool(V1_2::Operand operand)
209{
210 return operand.type == V1_2::OperandType::BOOL;
211}
212
Sadik Armagan61113162019-07-25 09:09:40 +0100213/// Checks if a operand is 1_2 Operand
214inline bool Is12Operand(V1_2::Operand)
215{
216 return true;
217}
218
Mike Kellyb5fdf382019-06-11 16:35:25 +0100219#endif
220
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100221template<typename LayerHandleType>
222armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
223 armnn::TensorInfo reshapeInfo)
224{
225 armnn::ReshapeDescriptor reshapeDescriptor;
226 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
227
228 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
229 BOOST_ASSERT(reshapeLayer != nullptr);
230
231 // Attach the input layer to the reshape layer
232 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
233 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
234
235 return *reshapeLayer;
236}
237
Sadik Armagan64b19b52019-08-19 09:49:58 +0100238bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
239 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100240{
241 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100242
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100243 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
244 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
245
246 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
247 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
248
249 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100250 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100251 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
252 input0.Connect(startLayer->GetInputSlot(0));
253 input1.Connect(startLayer->GetInputSlot(1));
254
Sadik Armagan64b19b52019-08-19 09:49:58 +0100255 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100256 }
257
258 // Since the number of dimensions do not match then we need to add degenerate dimensions
259 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
260
261 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
262 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
263 boost::numeric_cast<int>(inputDimensions1));
264
265 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
266 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
267 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
268
269 const armnn::TensorShape& smallShape = smallInfo.GetShape();
270 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
271 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
272 {
273 reshapedDimensions[i] = smallShape[i - sizeDifference];
274 }
275
276 armnn::TensorInfo reshapedInfo = smallInfo;
277 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
278 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100279
280 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
281 armnn::ReshapeDescriptor reshapeDescriptor;
282
283 bool isSupported = false;
284 FORWARD_LAYER_SUPPORT_FUNC(__func__,
285 IsReshapeSupported,
286 data.m_Backends,
287 isSupported,
288 reshapedInfo,
289 reshapeDescriptor);
290 if (!isSupported)
291 {
292 return false;
293 }
294
295 BOOST_ASSERT(data.m_Network != nullptr);
296 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100297
298 if (input0IsSmaller)
299 {
300 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
301 //
302 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100303 // | |
304 // Reshape |
305 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100306 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100307
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100308 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
309 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100310 }
311 else
312 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
314 //
315 // Input0 Input1
316 // | |
317 // | Reshape
318 // \ /
319 // StartLayer
320
arovir01b0717b52018-09-05 17:03:25 +0100321 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100322 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100323 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 return true;
arovir01b0717b52018-09-05 17:03:25 +0100326}
327
328void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
329 android::nn::PaddingScheme scheme)
330{
331 int32_t padHead;
332 int32_t padTail;
333 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
334 outPadHead = boost::numeric_cast<uint32_t>(padHead);
335 outPadTail = boost::numeric_cast<uint32_t>(padTail);
336}
337
Mike Kelly86b36d42019-07-12 16:39:33 +0100338#ifdef ARMNN_ANDROID_NN_V1_2
339
340void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
341 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
342{
343 int32_t padHead;
344 int32_t padTail;
345 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
346 outPadHead = boost::numeric_cast<uint32_t>(padHead);
347 outPadTail = boost::numeric_cast<uint32_t>(padTail);
348}
349
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100350void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
351 int32_t& outPadTail, android::nn::PaddingScheme scheme)
352{
353 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
354}
355
Mike Kelly86b36d42019-07-12 16:39:33 +0100356#endif
357
Matthew Bentham912b3622019-05-03 15:49:14 +0100358Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100359{
360 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100361 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100362 shape.dimensions = operand.dimensions;
363 shape.scale = operand.scale;
364 shape.offset = operand.zeroPoint;
365 return shape;
366}
367
Mike Kelly46272802019-08-14 17:00:48 +0100368#ifdef ARMNN_ANDROID_NN_V1_2
369
370Shape GetOperandShape(const V1_2::Operand& operand)
371{
372 Shape shape;
373 shape.type = OperandType(operand.type);
374 shape.dimensions = operand.dimensions;
375 shape.scale = operand.scale;
376 shape.offset = operand.zeroPoint;
377 return shape;
378}
379
380#endif
381
arovir01b0717b52018-09-05 17:03:25 +0100382// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
383// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100384// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
385// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100386void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
387 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
388{
389 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
390 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
391 {
392 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
393 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
394 {
395 ALOGW("Bias quantization scale has been modified to match input*weights");
396 biasInfo.SetQuantizationScale(expectedBiasScale);
397 }
398 }
399}
400
401// 4D Tensor Permutations
402const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
403const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
404const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
405const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
406
407// 3D Permutation Vectors
408const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
409const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
410const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
411
412template<typename OSlot>
413armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
414 const armnn::PermutationVector& mappings)
415{
416 // Add swizzle layer
417 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
418
419 BOOST_ASSERT(layer != nullptr);
420
421 // Connect input to swizzle layer
422 input.Connect(layer->GetInputSlot(0));
423
424 // Setup swizzled output
425 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
426 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
427
428 return *layer;
429}
430
431void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
432{
433 // Add swizzle layer
434 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
435 // Connect swizzled input to layer
436 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
437}
438
439armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
440{
441 // Add deswizzle layer
442 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
443 return deswizzleLayer;
444}
445
446// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
447armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
448 LayerInputHandle& input,
449 armnn::IConnectableLayer& firstLayer,
450 armnn::IConnectableLayer& lastLayer)
451{
452 SwizzleIn(network, input, firstLayer, 0);
453 return DeswizzleOut(network, lastLayer, 0);
454}
455
456// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
457armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
458 armnn::IConnectableLayer& layer)
459{
460 return SwizzleInDeswizzleOut(network, input, layer, layer);
461}
462
463bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
464 const armnn::TensorShape & outputShape,
465 uint32_t concatDim)
466{
467 // Validate the output shape is correct given the input shapes (which have just been validated)
468 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
469 if (outputShape.GetNumDimensions() != numDimensions)
470 {
471 return Fail("%s: Output shape has wrong number of dimensions", __func__);
472 }
473
474 unsigned int outputSizeAlongConcatenatedDimension = 0;
475 for (unsigned int i = 0; i < inputShapes.size(); i++)
476 {
477 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
478 }
479
480 for (unsigned int i = 0; i < numDimensions; ++i)
481 {
482 if (i == concatDim)
483 {
484 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
485 {
486 return Fail(
487 "%s: Invalid output shape for dimension %d (%d != %d)",
488 __func__,
489 i,
490 outputShape[i],
491 outputSizeAlongConcatenatedDimension);
492 }
493 }
494 else
495 {
496 if (outputShape[i] != inputShapes[0][i])
497 {
498 return Fail("%s: Invalid output shape", __func__);
499 }
500 }
501 }
502
503 return true;
504}
505
506bool RequiresReshape(armnn::TensorShape & inputShape)
507{
508 return inputShape.GetNumDimensions() < 3;
509}
510
arovir01b0717b52018-09-05 17:03:25 +0100511void SwizzleInputs(armnn::INetwork& network,
512 std::vector<LayerInputHandle>& inputs,
513 std::vector<armnn::TensorShape>& inputShapes,
514 const armnn::PermutationVector& mapping)
515{
516 if (!mapping.IsEqual(IdentityPermutation4D))
517 {
518 size_t nInputs = inputs.size();
519 for (size_t i=0; i<nInputs; ++i)
520 {
521 // add swizzle layer
522 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
523 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
524 auto& outputInfo = outputSlot.GetTensorInfo();
525 // replace inputs with the swizzled ones
526 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
527 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
528 }
529 }
530}
531
narpra01f176d5a2018-11-18 20:17:48 +0000532bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
533 int32_t & concatDimension,
534 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100535{
narpra01f176d5a2018-11-18 20:17:48 +0000536 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100537 BOOST_ASSERT(numberOfDimensions >= 3);
538
539 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000540 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
541 // or along dimension 0 or 2 for a 3-D tensor.
542 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100543 {
narpra01f176d5a2018-11-18 20:17:48 +0000544 concatDimension = 1;
545 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
546 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100547 }
narpra01f176d5a2018-11-18 20:17:48 +0000548 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100549 {
narpra01f176d5a2018-11-18 20:17:48 +0000550 concatDimension = 0;
551 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
552 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100553 }
narpra01f176d5a2018-11-18 20:17:48 +0000554 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100555}
556
557} // anonymous namespace
558
559namespace armnn_driver
560{
561
562//// Creates an ArmNN activation layer and connects it to the given layer, if the
563//// passed in AndroidNN activation function requires so.
564//// @return The end layer of the sequence of layers built for the given AndroidNN
565//// activation function or nullptr if an error occurred (e.g. unsupported activation).
566//// Note that the end layer matches the input layer if no activation is required
567//// (the sequence of layers has length 1).
568armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
569 ActivationFn activation,
570 armnn::IConnectableLayer* prevLayer,
571 ConversionData& data);
572
573} // namespace armnn_driver
574
575///
576/// Utility templates
577///
578
579namespace armnn_driver
580{
581
582using namespace android::nn;
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
585 typename HalOperand = typename HalPolicy::Operand,
586 typename HalOperation = typename HalPolicy::Operation,
587 typename HalModel = typename HalPolicy::Model>
588const HalOperand* GetInputOperand(const HalOperation& operation,
589 uint32_t inputIndex,
590 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100591 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100592{
593 if (inputIndex >= operation.inputs.size())
594 {
saoste01b8471482018-10-10 09:44:51 +0100595 if (failOnIndexOutOfBounds)
596 {
597 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
598 }
arovir01b0717b52018-09-05 17:03:25 +0100599 return nullptr;
600 }
601
602 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
603 return &model.operands[operation.inputs[inputIndex]];
604}
605
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100606template<typename HalPolicy,
607 typename HalOperand = typename HalPolicy::Operand,
608 typename HalOperation = typename HalPolicy::Operation,
609 typename HalModel = typename HalPolicy::Model>
610const HalOperand* GetOutputOperand(const HalOperation& operation,
611 uint32_t outputIndex,
612 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100613{
614 if (outputIndex >= operation.outputs.size())
615 {
616 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
617 return nullptr;
618 }
619
620 // Model should have been validated beforehand
621 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
622
623 return &model.operands[operation.outputs[outputIndex]];
624}
625
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100626template<typename HalPolicy,
627 typename HalOperand = typename HalPolicy::Operand,
628 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100629const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100630 const HalModel& model,
631 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000632 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100633{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100634 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100635
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100636 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100637 switch (operand.lifetime)
638 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100639 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100640 {
641 // Constant found in model.operandValues
642 valueStart = &model.operandValues[operand.location.offset];
643 break;
644 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100645 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100646 {
647 // Constant specified via a Memory object
648 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
649 break;
650 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000652 {
653 // An optional input tensor with no values is not an error so should not register as a fail
654 if (optional)
655 {
656 valueStart = nullptr;
657 break;
658 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100659 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000660 }
arovir01b0717b52018-09-05 17:03:25 +0100661 default:
662 {
663 // Unsupported/invalid (e.g. can't get value of an input to the model)
664 Fail("%s: unsupported/invalid operand lifetime: %s",
665 __func__, toString(operand.lifetime).c_str());
666 valueStart = nullptr;
667 }
668 }
669
670 return valueStart;
671}
672
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100673template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100674 typename HalOperation = typename HalPolicy::Operation,
675 typename HalModel = typename HalPolicy::Model,
676 typename HalOperandType = typename HalPolicy::OperandType>
677bool GetOperandType(const HalOperation& operation,
678 uint32_t inputIndex,
679 const HalModel& model,
680 HalOperandType& type)
681{
682 using HalOperand = typename HalPolicy::Operand;
683
684 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
685 if (!operand)
686 {
687 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
688 }
689
690 type = operand->type;
691 return true;
692}
693
694template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100695 typename HalOperand = typename HalPolicy::Operand,
696 typename HalModel = typename HalPolicy::Model>
697ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
698 const HalModel& model,
699 const ConversionData& data,
700 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
701 const armnn::TensorShape* overrideTensorShape = nullptr,
702 bool optional = false)
703{
704 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
705
706 if (!IsOperandTypeSupportedForTensors(operand.type))
707 {
708 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
709 return ConstTensorPin();
710 }
711
712 if (!optional &&
713 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
714 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
715 operand.lifetime != HalOperandLifeTime::NO_VALUE)
716 {
717 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
718 return ConstTensorPin();
719 }
720
721 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
722 if (!valueStart)
723 {
724 if (optional)
725 {
726 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
727 return ConstTensorPin(true);
728 }
729 // mandatory tensor with no values
730 Fail("%s: failed to get operand address", __func__);
731 return ConstTensorPin();
732 }
733
734 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
735 if (overrideTensorShape != nullptr)
736 {
737 tensorInfo.SetShape(*overrideTensorShape);
738 }
739 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
740}
741
742template<typename HalPolicy,
743 typename HalOperation = typename HalPolicy::Operation,
744 typename HalModel = typename HalPolicy::Model>
745ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
746 uint32_t inputIndex,
747 const HalModel& model,
748 const ConversionData& data,
749 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
750 const armnn::TensorShape* overrideTensorShape = nullptr,
751 bool optional = false)
752{
753 using HalOperand = typename HalPolicy::Operand;
754
755 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
756 if (!operand)
757 {
758 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
759 return ConstTensorPin();
760 }
761 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
762 model,
763 data,
764 dimensionMappings,
765 overrideTensorShape,
766 optional);
767}
768
769template<typename HalPolicy,
770 typename OutputType,
771 typename HalOperandType = typename HalPolicy::OperandType,
772 typename HalOperation = typename HalPolicy::Operation,
773 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100774bool GetInputScalar(const HalOperation& operation,
775 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100776 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100777 OutputType& outValue,
778 const HalModel& model,
779 const ConversionData& data)
780{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 using HalOperand = typename HalPolicy::Operand;
782
783 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100784 if (!operand)
785 {
786 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
787 }
788
789 if (operand->type != type)
790 {
791 return Fail("%s: unexpected operand type: %s (should be %s)",
792 __func__, toString(operand->type).c_str(), toString(type).c_str());
793 }
794
795 if (operand->location.length != sizeof(OutputType))
796 {
797 return Fail("%s: incorrect operand location length: %i (should be %i)",
798 __func__, operand->location.length, sizeof(OutputType));
799 }
800
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100802 if (!valueAddress)
803 {
804 return Fail("%s: failed to get address for operand", __func__);
805 }
806
807 outValue = *(static_cast<const OutputType*>(valueAddress));
808 return true;
809}
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100814bool GetInputInt32(const HalOperation& operation,
815 uint32_t inputIndex,
816 int32_t& outValue,
817 const HalModel& model,
818 const ConversionData& data)
819{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100821}
822
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100823template<typename HalPolicy,
824 typename HalOperation = typename HalPolicy::Operation,
825 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100826bool GetInputFloat32(const HalOperation& operation,
827 uint32_t inputIndex,
828 float& outValue,
829 const HalModel& model,
830 const ConversionData& data)
831{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100832 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100833}
834
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100835template<typename HalPolicy,
836 typename HalOperation = typename HalPolicy::Operation,
837 typename HalOperandType = typename HalPolicy::OperandType,
838 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100839bool GetInputActivationFunctionImpl(const HalOperation& operation,
840 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100841 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100842 ActivationFn& outActivationFunction,
843 const HalModel& model,
844 const ConversionData& data)
845{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100846 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100847 {
848 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
849 __func__,
850 toString(type).c_str(),
851 toString(OperandType::INT32).c_str(),
852 toString(OperandType::TENSOR_INT32).c_str());
853 }
854
855 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100857 {
858 return Fail("%s: failed to get activation input value", __func__);
859 }
860 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
861 return true;
862}
863
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100864template<typename HalPolicy,
865 typename HalOperation = typename HalPolicy::Operation,
866 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100867bool GetInputActivationFunction(const HalOperation& operation,
868 uint32_t inputIndex,
869 ActivationFn& outActivationFunction,
870 const HalModel& model,
871 const ConversionData& data)
872{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100873 return GetInputActivationFunctionImpl<HalPolicy>(operation,
874 inputIndex,
875 HalPolicy::OperandType::INT32,
876 outActivationFunction,
877 model,
878 data);
arovir01b0717b52018-09-05 17:03:25 +0100879}
880
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100881template<typename HalPolicy,
882 typename HalOperation = typename HalPolicy::Operation,
883 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100884bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
885 uint32_t inputIndex,
886 ActivationFn& outActivationFunction,
887 const HalModel& model,
888 const ConversionData& data)
889{
890 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100891 return GetInputActivationFunctionImpl<HalPolicy>(operation,
892 inputIndex,
893 HalPolicy::OperandType::INT32,
894 outActivationFunction,
895 model,
896 data);
arovir01b0717b52018-09-05 17:03:25 +0100897}
898
899
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100900template<typename HalPolicy,
901 typename HalOperation = typename HalPolicy::Operation,
902 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100903bool GetOptionalInputActivation(const HalOperation& operation,
904 uint32_t inputIndex,
905 ActivationFn& activationFunction,
906 const HalModel& model,
907 const ConversionData& data)
908{
909 if (operation.inputs.size() <= inputIndex)
910 {
911 activationFunction = ActivationFn::kActivationNone;
912 }
913 else
914 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100915 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100916 {
917 return Fail("%s: Operation has invalid inputs", __func__);
918 }
919 }
920 return true;
921}
922
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100923template<typename HalPolicy,
924 typename ConvolutionDescriptor,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100927bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
928 uint32_t dilationXIndex,
929 ConvolutionDescriptor& descriptor,
930 const HalModel& model,
931 const ConversionData& data)
932{
933 bool success = true;
934 if (operation.inputs.size() >= dilationXIndex + 2)
935 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936 success &= GetInputScalar<HalPolicy>(operation,
937 dilationXIndex,
938 HalPolicy::OperandType::INT32,
939 descriptor.m_DilationX,
940 model,
941 data);
942 success &= GetInputScalar<HalPolicy>(operation,
943 dilationXIndex + 1,
944 HalPolicy::OperandType::INT32,
945 descriptor.m_DilationY,
946 model,
947 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100948 }
949
950 return success;
951}
952
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100953template<typename HalPolicy,
954 typename HalOperand = typename HalPolicy::Operand,
955 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100956bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100957 std::vector<int32_t>& outValues,
958 const HalModel& model,
959 const ConversionData& data)
960{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100961 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100962 {
963 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
964 }
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100967 if (!startAddress)
968 {
969 return Fail("%s: failed to get operand address", __func__, operand.type);
970 }
971
972 // Check number of bytes is sensible
973 const uint32_t numBytes = operand.location.length;
974 if (numBytes % sizeof(int32_t) != 0)
975 {
976 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
977 __func__, numBytes, sizeof(int32_t));
978 }
979
980 outValues.resize(numBytes / sizeof(int32_t));
981 memcpy(outValues.data(), startAddress, numBytes);
982 return true;
983}
984
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100985template<typename HalPolicy,
986 typename HalOperation = typename HalPolicy::Operation,
987 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100988bool GetInputPaddingScheme(const HalOperation& operation,
989 uint32_t inputIndex,
990 PaddingScheme& outPaddingScheme,
991 const HalModel& model,
992 const ConversionData& data)
993{
994 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100995 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100996 {
997 return Fail("%s: failed to get padding scheme input value", __func__);
998 }
999
1000 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1001 return true;
1002}
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004template<typename HalPolicy,
1005 typename HalOperation = typename HalPolicy::Operation,
1006 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001007LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1008 uint32_t inputIndex,
1009 const HalModel& model,
1010 ConversionData& data)
1011{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001012 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001013 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001014 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1015
1016 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001017 if (!operand)
1018 {
1019 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1020 return LayerInputHandle();
1021 }
1022
1023 if (!IsOperandTypeSupportedForTensors(operand->type))
1024 {
1025 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1026 return LayerInputHandle();
1027 }
1028
Sadik Armagan44bcc022019-06-18 17:21:36 +01001029 try
arovir01b0717b52018-09-05 17:03:25 +01001030 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001031 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001032 if (IsDynamicTensor(operandTensorInfo))
1033 {
1034 Fail("%s: dynamic input tensors are not supported", __func__);
1035 return LayerInputHandle();
1036 }
arovir01b0717b52018-09-05 17:03:25 +01001037
Sadik Armagan44bcc022019-06-18 17:21:36 +01001038 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001039 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001040 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001041 {
1042 // NOTE: We must check whether we can support the input tensor on at least one
1043 // of the provided backends; otherwise we cannot convert the operation
1044 bool isInputSupported = false;
1045 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1046 IsInputSupported,
1047 data.m_Backends,
1048 isInputSupported,
1049 operandTensorInfo);
1050
1051 if (!isInputSupported)
1052 {
1053 Fail("%s: unsupported input tensor", __func__);
1054 return LayerInputHandle();
1055 }
1056
1057 BOOST_FALLTHROUGH; // intentional fallthrough
1058 }
1059 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001060 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001061 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001062 // The tensor is either an operand internal to the model, or a model input.
1063 // It can be associated with an ArmNN output slot for an existing layer.
1064
1065 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1066 const uint32_t operandIndex = operation.inputs[inputIndex];
1067 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001068 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001069 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001070 case HalOperandLifeTime::CONSTANT_REFERENCE:
1071 {
1072 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1073 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1074 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001075 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001076 bool isSupported = false;
1077 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1078 IsConstantSupported,
1079 data.m_Backends,
1080 isSupported,
1081 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001082 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 {
1084 return LayerInputHandle();
1085 }
1086
1087 armnn::IConnectableLayer* constantLayer =
1088 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1089 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1090 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1091
1092 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1093 }
1094 else
1095 {
1096 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001097 return LayerInputHandle();
1098 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001099 break;
arovir01b0717b52018-09-05 17:03:25 +01001100 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001101 default:
arovir01b0717b52018-09-05 17:03:25 +01001102 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 // Unsupported lifetime for an input tensor
1104 Fail("%s: unsupported lifetime for input tensor: %s",
1105 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001106 return LayerInputHandle();
1107 }
arovir01b0717b52018-09-05 17:03:25 +01001108 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001109 }
1110 catch (UnsupportedOperand<HalOperandType>& e)
1111 {
1112 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1113 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001114 }
1115}
1116
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117template<typename HalPolicy,
1118 typename HalOperation = typename HalPolicy::Operation,
1119 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001120bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1121 uint32_t operationOutputIndex,
1122 armnn::IConnectableLayer& layer,
1123 uint32_t layerOutputIndex,
1124 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001125 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001126{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001127 using HalOperand = typename HalPolicy::Operand;
1128
1129 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001130 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1131 {
1132 return false;
1133 }
1134
1135 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1136
1137 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1138 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1139
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001140 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001141
1142 return true;
1143}
1144
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001145template<typename HalPolicy,
1146 typename HalOperation = typename HalPolicy::Operation,
1147 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001148armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1149 uint32_t inputIndex,
1150 const HalModel& model,
1151 ConversionData& data)
1152{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001153 using HalOperand = typename HalPolicy::Operand;
1154
1155 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001156 if (!operand)
1157 {
1158 return armnn::DataLayout::NHWC;
1159 }
1160
1161 if (!IsBool(*operand))
1162 {
1163 return armnn::DataLayout::NHWC;
1164 }
1165
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001166 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001167 if (!valueAddress)
1168 {
1169 return armnn::DataLayout::NHWC;
1170 }
1171
1172 if (*(static_cast<const bool*>(valueAddress)))
1173 {
1174 return armnn::DataLayout::NCHW;
1175 }
1176 else
1177 {
1178 return armnn::DataLayout::NHWC;
1179 }
1180}
1181
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001182template<typename HalPolicy,
1183 typename HalOperation = typename HalPolicy::Operation,
1184 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001185bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1186 uint32_t outputIndex,
1187 armnn::IConnectableLayer& layer,
1188 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001189 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001190{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001191 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1192 outputIndex,
1193 layer,
1194 outputIndex,
1195 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001196 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001197}
1198
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199template<typename HalPolicy,
1200 typename HalOperation = typename HalPolicy::Operation,
1201 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001202bool ConvertToActivation(const HalOperation& operation,
1203 const char* operationName,
1204 const armnn::ActivationDescriptor& activationDesc,
1205 const HalModel& model,
1206 ConversionData& data)
1207{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001208 using HalOperand = typename HalPolicy::Operand;
1209
1210 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001211 if (!input.IsValid())
1212 {
1213 return Fail("%s: Input 0 is invalid", operationName);
1214 }
1215
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001216 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001217 if (!outputOperand)
1218 {
1219 return false;
1220 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001221
1222 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001223 if (IsDynamicTensor(outInfo))
1224 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001225 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001226 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001227
1228 bool isSupported = false;
1229 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1230 IsActivationSupported,
1231 data.m_Backends,
1232 isSupported,
1233 input.GetTensorInfo(),
1234 outInfo,
1235 activationDesc);
1236 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001237 {
1238 return false;
1239 }
1240
1241 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1242 BOOST_ASSERT(layer != nullptr);
1243 input.Connect(layer->GetInputSlot(0));
1244
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001245 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001246}
1247
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001248template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001249 typename HalOperation = typename HalPolicy::Operation,
1250 typename HalModel = typename HalPolicy::Model>
1251bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1252{
1253 armnn::ActivationDescriptor desc;
1254 desc.m_Function = armnn::ActivationFunction::ReLu;
1255
1256 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1257}
1258
1259template<typename HalPolicy,
1260 typename HalOperation = typename HalPolicy::Operation,
1261 typename HalModel = typename HalPolicy::Model>
1262bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1263{
1264 armnn::ActivationDescriptor desc;
1265 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1266 desc.m_A = 1.0f;
1267 desc.m_B = -1.0f;
1268
1269 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1270}
1271
1272template<typename HalPolicy,
1273 typename HalOperation = typename HalPolicy::Operation,
1274 typename HalModel = typename HalPolicy::Model>
1275bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1276{
1277 armnn::ActivationDescriptor desc;
1278 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1279 desc.m_A = 6.0f;
1280
1281 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1282}
1283
1284template<typename HalPolicy,
1285 typename HalOperation = typename HalPolicy::Operation,
1286 typename HalModel = typename HalPolicy::Model>
1287bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1288{
1289 armnn::ActivationDescriptor desc;
1290 desc.m_Function = armnn::ActivationFunction::TanH;
1291 desc.m_A = 1.0f; // android nn does not support tanH parameters
1292 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1293
1294 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1295}
1296
1297template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001298 typename HalOperation = typename HalPolicy::Operation,
1299 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001300bool ConvertPaddings(const HalOperation& operation,
1301 const HalModel& model,
1302 ConversionData& data,
1303 unsigned int rank,
1304 armnn::PadDescriptor& padDescriptor)
1305{
1306 using HalOperand = typename HalPolicy::Operand;
1307
1308 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1309 if (!paddingsOperand)
1310 {
1311 return Fail("%s: Could not read paddings operand", __func__);
1312 }
1313
1314 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1315 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1316 {
1317 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1318 }
1319
1320 std::vector<int32_t> paddings;
1321 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1322
1323 // add padding for each dimension of input tensor.
1324 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1325 {
1326 int paddingBeforeInput = paddings[i];
1327 int paddingAfterInput = paddings[i + 1];
1328
1329 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1330 {
1331 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1332 }
1333
1334 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1335 }
1336
1337 return true;
1338}
1339
1340template<typename HalPolicy,
1341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001343bool ConvertPooling2d(const HalOperation& operation,
1344 const char* operationName,
1345 armnn::PoolingAlgorithm poolType,
1346 const HalModel& model,
1347 ConversionData& data)
1348{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001349 using HalOperand = typename HalPolicy::Operand;
1350 using HalOperandType = typename HalPolicy::OperandType;
1351
1352 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001353 if (!input.IsValid())
1354 {
1355 return Fail("%s: Could not read input 0", operationName);
1356 }
1357
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001358 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001359 if (!output)
1360 {
1361 return Fail("%s: Could not read output 0", __func__);
1362 }
1363
1364 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1366
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001367 if (IsDynamicTensor(outputInfo))
1368 {
1369 return Fail("%s: Dynamic output tensors are not supported", __func__);
1370 }
1371
arovir01b0717b52018-09-05 17:03:25 +01001372 armnn::Pooling2dDescriptor desc;
1373 desc.m_PoolType = poolType;
1374 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001375 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001376
1377 ActivationFn activation;
1378
Sadik Armagan15d63e22019-07-26 16:59:35 +01001379 auto inputSize = operation.inputs.size();
1380
1381 if (inputSize >= 10)
1382 {
1383 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1384 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1385 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1386 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1387 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1388 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1389 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1390 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1391 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1392 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1393 {
1394 return Fail("%s: Operation has invalid inputs", operationName);
1395 }
1396
1397 if (Is12Operand(*output))
1398 {
1399 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1400 }
1401 }
1402 else
arovir01b0717b52018-09-05 17:03:25 +01001403 {
1404 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1405 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001406 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1407 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1408 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1409 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1410 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1411 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001412 {
1413 return Fail("%s: Operation has invalid inputs", operationName);
1414 }
1415
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001416 const unsigned int inputWidth = inputInfo.GetShape()[2];
1417 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001418
1419 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1420 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001421
1422 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001423 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001424 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001425 }
1426 }
1427
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001428 bool isSupported = false;
1429 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1430 IsPooling2dSupported,
1431 data.m_Backends,
1432 isSupported,
1433 inputInfo,
1434 outputInfo,
1435 desc);
1436 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001437 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001438 return false;
arovir01b0717b52018-09-05 17:03:25 +01001439 }
arovir01b0717b52018-09-05 17:03:25 +01001440
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001441 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1442 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001443 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001444 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001445 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001446
1447 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1448 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001449 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001450 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001451 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001452
1453 input.Connect(pooling2dLayer->GetInputSlot(0));
1454
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001455 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456}
1457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001458template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001459 typename Operation = typename HalPolicy::Operation,
1460 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001461bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1462{
1463 using Operand = typename HalPolicy::Operand;
1464
1465 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1466 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1467
1468 if (!input0.IsValid() || !input1.IsValid())
1469 {
1470 return Fail("%s: Operation has invalid inputs", __func__);
1471 }
1472
1473 // The FuseActivation parameter is always the input index 2
1474 // and it should be optional
1475 ActivationFn activationFunction;
1476 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1477 {
1478 return Fail("%s: Operation has invalid inputs", __func__);
1479 }
1480
1481 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1482 if (!outputOperand)
1483 {
1484 return false;
1485 }
1486
1487 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1488 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1489
1490 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1491 if (IsDynamicTensor(outputInfo))
1492 {
1493 return Fail("%s: Dynamic output tensors are not supported", __func__);
1494 }
1495
1496 bool isSupported = false;
1497 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1498 IsAdditionSupported,
1499 data.m_Backends,
1500 isSupported,
1501 inputInfo0,
1502 inputInfo1,
1503 outputInfo);
1504 if (!isSupported)
1505 {
1506 return false;
1507 }
1508
1509 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1510 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1511
1512 if (endLayer != nullptr)
1513 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001514 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1515 if (!isReshapeSupported)
1516 {
1517 return false;
1518 }
1519
Mike Kelly46272802019-08-14 17:00:48 +01001520 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1521 }
1522 else
1523 {
1524 return Fail("%s: ProcessActivation failed", __func__);
1525 }
1526}
1527
1528template<typename HalPolicy,
1529 typename Operation = typename HalPolicy::Operation,
1530 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001531bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1532{
1533 using HalOperand = typename HalPolicy::Operand;
1534 using HalOperandType = typename HalPolicy::OperandType;
1535
1536 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1537 if (operation.inputs.size() <= 1)
1538 {
1539 return Fail("%s: Operation has insufficient arguments", __func__);
1540 }
1541
1542 // Get inputs and outputs
1543 const std::size_t numInputTensors = operation.inputs.size() - 1;
1544
1545 int32_t concatDim;
1546 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1547 {
1548 return Fail("%s: Operation has invalid inputs", __func__);
1549 }
1550
1551 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1552 if (!outputOperand)
1553 {
1554 return Fail("%s: Operation has no outputs", __func__);
1555 }
1556
1557
1558 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1559 armnn::TensorShape outputShape = outputInfo.GetShape();
1560
1561 //
1562 // handle negative concat dims along the lines of tensorflow as described here:
1563 // https://www.tensorflow.org/api_docs/python/tf/concat
1564 // "negative axis refers to axis + rank(values)-th dimension"
1565 //
1566 if (concatDim < 0)
1567 {
1568 concatDim += outputShape.GetNumDimensions();
1569 }
1570
1571 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1572 {
1573 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1574 }
1575
1576 std::vector<LayerInputHandle> inputHandles;
1577 std::vector<armnn::TensorShape> inputShapes;
1578
1579 inputHandles.reserve(numInputTensors);
1580 inputShapes.reserve(numInputTensors);
1581
1582 bool inputsHaveBeenReshaped = false;
1583 unsigned int tensorDimensionsAdded = 0;
1584
1585 for (uint32_t i = 0; i < numInputTensors; ++i)
1586 {
1587 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1588 if (!operand)
1589 {
1590 return Fail("%s: Operation has invalid inputs", __func__);
1591 }
1592
1593 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
1594 LayerInputHandle operandInputHandle =
1595 ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1596
1597 if (operandShape.GetNumDimensions() == 0)
1598 {
1599 return Fail("%s: Operands with rank 0 are not supported", __func__);
1600 }
1601
1602 if (RequiresReshape(operandShape))
1603 {
1604 inputsHaveBeenReshaped = true;
1605
1606 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1607
1608 // Expand the tensor to three dimensions
1609 if (operandShape.GetNumDimensions() == 2)
1610 {
1611 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1612 tensorDimensionsAdded = 1;
1613 }
1614 else
1615 {
1616 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1617 tensorDimensionsAdded = 2;
1618 }
1619
1620 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1621 *data.m_Network,
1622 operandInputHandle,
1623 reshapeInfo
1624 );
1625
1626 // Point to the reshape operation rather then the input operation
1627 operandShape = reshapeInfo.GetShape();
1628 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1629 }
1630
1631 inputShapes.emplace_back(operandShape);
1632 inputHandles.emplace_back(operandInputHandle);
1633
1634 if (!inputHandles.back().IsValid())
1635 {
1636 return Fail("%s: Operation has invalid inputs", __func__);
1637 }
1638 }
1639
1640 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1641
1642 if (inputsHaveBeenReshaped)
1643 {
1644 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1645 concatDim += tensorDimensionsAdded;
1646
1647 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1648 if (tensorDimensionsAdded == 1)
1649 {
1650 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1651 }
1652 else if (tensorDimensionsAdded == 2)
1653 {
1654 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1655 }
1656 }
1657
1658 // Check if permutations is required and get the pair of permutations required for the concatenation.
1659 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1660 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1661 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1662
1663 bool needPermute =
1664 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1665
1666 if (needPermute)
1667 {
1668 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1669 }
1670
1671 outputInfo.SetShape(outputShape);
1672
1673 // this is no-op for identity swizzles, otherwise it replaces both
1674 // the handles and shapes with the swizzled layer output handles and shapes
1675 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1676
1677 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1678 armnn::OriginsDescriptor concatDescriptor;
1679
1680 try
1681 {
1682 // The concat descriptor is always created across the only supported concat dimension
1683 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1684 concatDescriptor =
1685 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1686 }
1687 catch (const armnn::Exception& error)
1688 {
1689 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1690 }
1691
1692 // Validate the output shape is correct given the input shapes based on the
1693 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1694 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1695 {
1696 return Fail("%s: Error validating the output shape for concat", __func__);
1697 }
1698
1699 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1700 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1701 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1702
1703 bool isSupported = false;
1704 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1705 IsConcatSupported,
1706 data.m_Backends,
1707 isSupported,
1708 inputTensorInfos,
1709 outputInfo,
1710 concatDescriptor);
1711 if (!isSupported)
1712 {
1713 return false;
1714 }
1715
1716 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1717 assert(layer != nullptr);
1718 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1719
1720 // Connect inputs to the layer
1721 const int numInputSlots = layer->GetNumInputSlots();
1722 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1723 for (int i = 0; i < numInputSlots; ++i)
1724 {
1725 // connect the input directly to the merge (concat) layer
1726 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1727 }
1728
1729 if (needPermute)
1730 {
1731 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1732 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1733 layer->GetOutputSlot(0),
1734 permutationPair.second);
1735 layer = &deswizzleLayer;
1736 }
1737
1738 if (inputsHaveBeenReshaped)
1739 {
1740 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1741
1742 // Undo the reshape knowing the amount of dimensions added
1743 if (tensorDimensionsAdded == 1)
1744 {
1745 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1746 afterConcatInfo.GetShape()[2] }));
1747 }
1748 else if (tensorDimensionsAdded == 2)
1749 {
1750 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1751 }
1752
1753 layer = &AddReshapeLayer(
1754 *data.m_Network,
1755 layer->GetOutputSlot(0),
1756 afterConcatInfo
1757 );
1758 }
1759
1760 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1761}
1762
1763template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001764 typename HalOperation = typename HalPolicy::Operation,
1765 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001766bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1767{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001768 using HalOperand = typename HalPolicy::Operand;
1769 using HalOperandType = typename HalPolicy::OperandType;
1770
1771 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001772 if (!input.IsValid())
1773 {
1774 return Fail("%s: Operation has invalid inputs", __func__);
1775 }
1776
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001777 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001778 if (!output)
1779 {
1780 return Fail("%s: Could not read output 0", __func__);
1781 }
1782
1783 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001784 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001785
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001786 if (IsDynamicTensor(outputInfo))
1787 {
1788 return Fail("%s: Dynamic output tensors are not supported", __func__);
1789 }
1790
Mike Kellyb5fdf382019-06-11 16:35:25 +01001791 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001792 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1793 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001794
1795 if (!weightsPin.IsValid() || !biasPin.IsValid())
1796 {
1797 return Fail("%s: Operation has invalid inputs", __func__);
1798 }
1799
1800 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001801 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001802 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1803
1804 armnn::Convolution2dDescriptor desc;
1805 desc.m_DataLayout = armnn::DataLayout::NHWC;
1806 ActivationFn activation;
1807
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001808 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001809 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001810 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1811 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1812 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1813 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1814 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1815 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001816 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001817 {
1818 return Fail("%s: Operation has invalid inputs", __func__);
1819 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001820 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001821 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001822 {
1823 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001824 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1825 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1826 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001827 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001828 {
1829 return Fail("%s: Operation has invalid inputs", __func__);
1830 }
1831
1832 const uint32_t kernelX = weights.GetShape()[2];
1833 const uint32_t kernelY = weights.GetShape()[1];
1834 const uint32_t inputX = inputInfo.GetShape()[2];
1835 const uint32_t inputY = inputInfo.GetShape()[1];
1836
1837 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1838 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001839 }
1840 else
1841 {
1842 return Fail("%s: Unsupported number of operation inputs", __func__);
1843 }
1844
1845 desc.m_BiasEnabled = true;
1846 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1847
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001848 bool isSupported = false;
1849 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1850 IsConvolution2dSupported,
1851 data.m_Backends,
1852 isSupported,
1853 inputInfo,
1854 outputInfo,
1855 desc,
1856 weights.GetInfo(),
1857 biases);
1858 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001859 {
1860 return false;
1861 }
1862
1863 armnn::IConnectableLayer* startLayer =
1864 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1865
1866 if (!startLayer)
1867 {
1868 return Fail("%s: AddConvolution2dLayer failed", __func__);
1869 }
1870
1871 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1872
1873 if (!endLayer)
1874 {
1875 return Fail("%s: ProcessActivation failed", __func__);
1876 }
1877
1878 input.Connect(startLayer->GetInputSlot(0));
1879
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001880 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001881}
1882
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001883template<typename HalPolicy,
1884 typename HalOperation = typename HalPolicy::Operation,
1885 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01001886bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
1887{
1888 using HalOperand = typename HalPolicy::Operand;
1889 using HalOperandType = typename HalPolicy::OperandType;
1890
1891 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1892 if (!input.IsValid() )
1893 {
1894 return Fail("%s: Operation has invalid inputs", __func__);
1895 }
1896
1897 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1898 unsigned int rank = inputInfo.GetNumDimensions();
1899 if (rank != 4)
1900 {
1901 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1902 }
1903
1904 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1905 if (!output)
1906 {
1907 return Fail("%s: Could not read output 0", __func__);
1908 }
1909
1910 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1911 if (IsDynamicTensor(outputInfo))
1912 {
1913 return Fail("%s: Dynamic output tensors are not supported", __func__);
1914 }
1915
1916 armnn::DepthToSpaceDescriptor descriptor;
1917
1918 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
1919 if (descriptor.m_BlockSize <= 1)
1920 {
1921 return Fail("%s: Block size must be at least 1 in all dimensions");
1922 }
1923
1924 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
1925 if (Is12Operand(*output))
1926 {
1927 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
1928 }
1929
1930 bool isSupported = false;
1931 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1932 IsDepthToSpaceSupported,
1933 data.m_Backends,
1934 isSupported,
1935 inputInfo,
1936 outputInfo,
1937 descriptor);
1938 if (!isSupported)
1939 {
1940 return false;
1941 }
1942
1943 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1944 assert(layer != nullptr);
1945 input.Connect(layer->GetInputSlot(0));
1946
1947 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1948}
1949
1950template<typename HalPolicy,
1951 typename HalOperation = typename HalPolicy::Operation,
1952 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001953bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1954{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001955 using HalOperand = typename HalPolicy::Operand;
1956 using HalOperandType = typename HalPolicy::OperandType;
1957
1958 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001959
1960 if (!input.IsValid())
1961 {
1962 return Fail("%s: Operation has invalid inputs", __func__);
1963 }
1964
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001965 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001966
1967 if (!output)
1968 {
1969 return Fail("%s: Could not read output 0", __func__);
1970 }
1971
1972 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001973 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001974
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001975 if (IsDynamicTensor(outputInfo))
1976 {
1977 return Fail("%s: Dynamic output tensors are not supported", __func__);
1978 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001979
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001980 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001981 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001982 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001983
1984 if (weightsOperand == nullptr)
1985 {
1986 return Fail("%s: Operand is invalid", __func__);
1987 }
1988 armnn::DepthwiseConvolution2dDescriptor desc;
1989 desc.m_DataLayout = armnn::DataLayout::NHWC;
1990
Mike Kellyb5fdf382019-06-11 16:35:25 +01001991 // Reinterpret weight data as [ H, W, I, M ]
1992 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1993 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001994 inputInfo.GetShape()[3],
1995 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001996
1997 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1998 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1999
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002000 const ConstTensorPin weightsPin =
2001 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2002 1,
2003 model,
2004 data,
2005 HWIMToMIHW,
2006 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002007
2008 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002009 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002010
2011 if (!weightsPin.IsValid() || !biasPin.IsValid())
2012 {
2013 return Fail("%s: Operation has invalid inputs", __func__);
2014 }
2015
2016 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2017 armnn::ConstTensor bias = biasPin.GetConstTensor();
2018 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2019
2020 ActivationFn activation;
2021
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002022 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002023 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002024 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2025 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2026 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2027 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2028 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2029 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002030 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002031 {
2032 return Fail("%s: Operation has invalid inputs", __func__);
2033 }
2034 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002035 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002036 {
2037 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002038 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2039 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2040 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002041 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
2046 const uint32_t kernelX = weights.GetShape()[3];
2047 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002048 const uint32_t inputX = inputInfo.GetShape()[2];
2049 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002050
2051 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2052 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2053 }
2054 else
2055 {
2056 return Fail("%s: Unsupported number of operation inputs", __func__);
2057 }
2058
2059 desc.m_BiasEnabled = true;
2060 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2061
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002062 bool isSupported = false;
2063 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2064 IsDepthwiseConvolutionSupported,
2065 data.m_Backends,
2066 isSupported,
2067 inputInfo,
2068 outputInfo,
2069 desc,
2070 weights.GetInfo(),
2071 biases);
2072 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002073 {
2074 return false;
2075 }
2076
2077 armnn::IConnectableLayer* startLayer =
2078 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2079 if (!startLayer)
2080 {
2081 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2082 }
2083
2084 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2085 if (!endLayer)
2086 {
2087 return Fail("%s: ProcessActivation failed", __func__);
2088 }
2089
2090 input.Connect(startLayer->GetInputSlot(0));
2091
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002092 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002093}
2094
Mike Kelly3c673942019-07-25 09:26:06 +01002095template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002096 typename Operation = typename HalPolicy::Operation,
2097 typename Model = typename HalPolicy::Model>
2098bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002099{
Mike Kelly46272802019-08-14 17:00:48 +01002100 using Operand = typename HalPolicy::Operand;
2101
2102 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2103 if (!input.IsValid())
2104 {
2105 return Fail("%s: Operation has invalid input", __func__);
2106 }
2107
2108 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2109 if (!outputOperand)
2110 {
2111 return Fail("%s: Operation has invalid outputs", __func__);
2112 }
2113
2114 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2115 if (IsDynamicTensor(outputInfo))
2116 {
2117 return Fail("%s: Dynamic output tensors are not supported", __func__);
2118 }
2119
2120 bool isSupported = false;
2121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2122 IsDequantizeSupported,
2123 data.m_Backends,
2124 isSupported,
2125 input.GetTensorInfo(),
2126 GetTensorInfoForOperand(*outputOperand));
2127 if (!isSupported)
2128 {
2129 return false;
2130 }
2131
2132 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2133 assert(layer != nullptr);
2134 input.Connect(layer->GetInputSlot(0));
2135
2136 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2137}
2138
2139template<typename HalPolicy,
2140 typename Operation = typename HalPolicy::Operation,
2141 typename Model = typename HalPolicy::Model>
2142bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2143{
2144 using Operand = typename HalPolicy::Operand;
2145
2146 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2147 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2148
2149 if (!input0.IsValid() || !input1.IsValid())
2150 {
2151 return Fail("%s: Operation has invalid inputs", __func__);
2152 }
2153
2154 // The FuseActivation parameter is always the input index 2
2155 // and it should be optional
2156 ActivationFn activationFunction;
2157 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2158 {
2159 return Fail("%s: Operation has invalid inputs", __func__);
2160 }
2161
2162 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2163 if (!output)
2164 {
2165 return Fail("%s: Could not read output 0", __func__);
2166 }
2167
2168 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2169 if (IsDynamicTensor(outputInfo))
2170 {
2171 return Fail("%s: Dynamic output tensors are not supported", __func__);
2172 }
2173
2174 bool isSupported = false;
2175 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2176 IsDivisionSupported,
2177 data.m_Backends,
2178 isSupported,
2179 input0.GetTensorInfo(),
2180 input1.GetTensorInfo(),
2181 outputInfo);
2182 if (!isSupported)
2183 {
2184 return false;
2185 }
2186
2187 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2188 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2189
2190 if (endLayer)
2191 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002192 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2193 if (!isReshapeSupported)
2194 {
2195 return false;
2196 }
2197
Mike Kelly46272802019-08-14 17:00:48 +01002198 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2199 }
2200 return Fail("%s: ProcessActivation failed", __func__);
2201}
2202
2203template<typename HalPolicy,
2204 typename Operation = typename HalPolicy::Operation,
2205 typename Model = typename HalPolicy::Model>
2206bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2207{
2208 using Operand = typename HalPolicy::Operand;
2209
2210 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2211 if (!input.IsValid())
2212 {
2213 return Fail("%s: Operation has invalid inputs", __func__);
2214 }
2215
2216 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2217 if (!outputOperand)
2218 {
2219 return Fail("%s: Operation has invalid outputs", __func__);
2220 }
2221
2222 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2223 if (IsDynamicTensor(outputInfo))
2224 {
2225 return Fail("%s: Dynamic output tensors are not supported", __func__);
2226 }
2227
2228 bool isSupported = false;
2229 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2230 IsFloorSupported,
2231 data.m_Backends,
2232 isSupported,
2233 input.GetTensorInfo(),
2234 outputInfo);
2235 if (!isSupported)
2236 {
2237 return false;
2238 }
2239
2240 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2241 assert(layer != nullptr);
2242 input.Connect(layer->GetInputSlot(0));
2243
2244 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2245}
2246
2247template<typename HalPolicy,
2248 typename Operation = typename HalPolicy::Operation,
2249 typename Model = typename HalPolicy::Model>
2250bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2251{
2252 using Operand = typename HalPolicy::Operand;
2253
2254 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2255 if (!input.IsValid())
2256 {
2257 return Fail("%s: Operation has invalid inputs", __func__);
2258 }
2259
2260 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2261 if (!output)
2262 {
2263 return Fail("%s: Could not read output 0", __func__);
2264 }
2265
2266 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2267 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2268
2269 if (IsDynamicTensor(outputInfo))
2270 {
2271 return Fail("%s: Dynamic output tensors are not supported", __func__);
2272 }
2273
2274 // ArmNN does not currently support non-fixed weights or bias
2275 ConstTensorPin weightsPin =
2276 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2277 ConstTensorPin biasPin =
2278 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2279
2280 if (!weightsPin.IsValid() || !biasPin.IsValid())
2281 {
2282 return Fail("%s: Operation has invalid inputs", __func__);
2283 }
2284
2285 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2286 armnn::ConstTensor bias = biasPin.GetConstTensor();
2287 armnn::TensorInfo reshapedInfo = inputInfo;
2288
2289 try
2290 {
2291 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2292 } catch (const std::exception &e) {
2293 return Fail("%s: %s", __func__, e.what());
2294 }
2295
2296 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2297 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2298
2299 ActivationFn activationFunction;
2300 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2301 {
2302 return Fail("%s: Operation has invalid inputs", __func__);
2303 }
2304
2305 armnn::FullyConnectedDescriptor desc;
2306 desc.m_TransposeWeightMatrix = true;
2307 desc.m_BiasEnabled = true;
2308
2309 bool isSupported = false;
2310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2311 IsFullyConnectedSupported,
2312 data.m_Backends,
2313 isSupported,
2314 reshapedInfo,
2315 outputInfo,
2316 weights.GetInfo(),
2317 bias.GetInfo(),
2318 desc);
2319 if (!isSupported)
2320 {
2321 return false;
2322 }
2323
2324 armnn::IConnectableLayer* startLayer =
2325 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2326 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2327
2328 if (endLayer != nullptr)
2329 {
2330 if (inputInfo.GetNumDimensions() > 2U)
2331 {
2332 armnn::ReshapeDescriptor reshapeDescriptor;
2333 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2334
2335 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2336 assert(reshapeLayer != nullptr);
2337 input.Connect(reshapeLayer->GetInputSlot(0));
2338 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2339 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2340 }
2341 else
2342 {
2343 input.Connect(startLayer->GetInputSlot(0));
2344 }
2345
2346 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2347 }
2348 else
2349 {
2350 return Fail("%s: ProcessActivation failed", __func__);
2351 }
2352}
2353
2354template<typename HalPolicy,
2355 typename Operation = typename HalPolicy::Operation,
2356 typename Model = typename HalPolicy::Model>
2357bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2358{
Mike Kelly999e2092019-08-15 10:46:46 +01002359 if (operation.inputs.size() != 1)
2360 {
2361 return Fail("%s: Optional inputs are not supported", __func__);
2362 }
2363
Mike Kelly46272802019-08-14 17:00:48 +01002364 using Operand = typename HalPolicy::Operand;
2365
2366 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2367 if (!input.IsValid())
2368 {
2369 return Fail("%s: Operation has invalid inputs", __func__);
2370 }
2371
2372 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2373 if (!output)
2374 {
2375 return Fail("%s: Could not read output 0", __func__);
2376 }
2377
2378 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2379 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2380
2381 if (IsDynamicTensor(outputInfo))
2382 {
2383 return Fail("%s: Dynamic output tensors are not supported", __func__);
2384 }
2385 if (outputInfo.GetNumDimensions() != 4u)
2386 {
2387 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2388 }
2389
2390 armnn::L2NormalizationDescriptor desc;
2391 desc.m_DataLayout = armnn::DataLayout::NHWC;
2392
2393 bool isSupported = false;
2394 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2395 IsL2NormalizationSupported,
2396 data.m_Backends,
2397 isSupported,
2398 inputInfo,
2399 outputInfo,
2400 desc);
2401 if (!isSupported)
2402 {
2403 return false;
2404 }
2405
2406 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2407 assert(layer != nullptr);
2408 input.Connect(layer->GetInputSlot(0));
2409
2410 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2411}
2412
2413template<typename HalPolicy,
2414 typename Operation = typename HalPolicy::Operation,
2415 typename Model = typename HalPolicy::Model>
2416bool ConvertLocalResponseNormalization(const Operation& operation,
2417 const Model& model,
2418 ConversionData& data)
2419{
Mike Kelly999e2092019-08-15 10:46:46 +01002420 if (operation.inputs.size() != 5)
2421 {
2422 return Fail("%s: Optional inputs are not supported", __func__);
2423 }
2424
Mike Kelly46272802019-08-14 17:00:48 +01002425 using Operand = typename HalPolicy::Operand;
2426 using OperandType = typename HalPolicy::OperandType;
2427
2428 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2429 if (!input.IsValid())
2430 {
2431 return Fail("%s: Operation has invalid inputs", __func__);
2432 }
2433
2434 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2435 if (!output)
2436 {
2437 return Fail("%s: Could not read output 0", __func__);
2438 }
2439
2440 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2441 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2442
2443 if (IsDynamicTensor(outputInfo))
2444 {
2445 return Fail("%s: Dynamic output tensors are not supported", __func__);
2446 }
2447 if (outputInfo.GetNumDimensions() != 4u)
2448 {
2449 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2450 }
2451
2452 armnn::NormalizationDescriptor descriptor;
2453 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2454 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2455 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2456
2457 if (!input.IsValid() ||
2458 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2459 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2460 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2461 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2462 {
2463 return Fail("%s: Operation has invalid inputs", __func__);
2464 }
2465
2466 // ArmNN expects normSize to be the full size of the normalization
2467 // window rather than the radius as in AndroidNN.
2468 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2469
2470 bool isSupported = false;
2471 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2472 IsNormalizationSupported,
2473 data.m_Backends,
2474 isSupported,
2475 inputInfo,
2476 outputInfo,
2477 descriptor);
2478 if (!isSupported)
2479 {
2480 return false;
2481 }
2482
2483
2484 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2485 assert(layer != nullptr);
2486 input.Connect(layer->GetInputSlot(0));
2487
2488 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2489}
2490
2491template<typename HalPolicy,
2492 typename Operation = typename HalPolicy::Operation,
2493 typename Model = typename HalPolicy::Model>
2494bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2495{
2496 using Operand = typename HalPolicy::Operand;
2497
2498 armnn::ActivationDescriptor desc;
2499 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2500
2501 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2502}
2503
2504template<typename HalPolicy,
2505 typename Operation = typename HalPolicy::Operation,
2506 typename Model = typename HalPolicy::Model>
2507bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2508{
2509 using Operand = typename HalPolicy::Operand;
2510
2511 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2512 if (!input.IsValid())
2513 {
2514 return Fail("%s: Operation has invalid inputs", __func__);
2515 }
2516
2517 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2518 if (!output)
2519 {
2520 return Fail("%s: Could not read output 0", __func__);
2521 }
2522
2523 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2524 if (IsDynamicTensor(outputInfo))
2525 {
2526 return Fail("%s: Dynamic output tensors are not supported", __func__);
2527 }
2528
2529 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2530 if (!axisOperand)
2531 {
2532 return Fail("%s: Could not read input 1", __func__);
2533 }
2534
2535 std::vector<int32_t> axis;
2536 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2537 {
2538 return Fail("%s: Input 1 has invalid values", __func__);
2539 }
2540
2541 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2542
2543 // Convert the axis to unsigned int and remove duplicates.
2544 unsigned int rank = inputInfo.GetNumDimensions();
2545 std::set<unsigned int> uniqueAxis;
2546 std::transform(axis.begin(), axis.end(),
2547 std::inserter(uniqueAxis, uniqueAxis.begin()),
2548 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2549
2550 // Get the "keep dims" flag.
2551 int32_t keepDims = 0;
2552 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2553 {
2554 return Fail("%s: Could not read input 2", __func__);
2555 }
2556
2557 armnn::MeanDescriptor descriptor;
2558 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2559 descriptor.m_KeepDims = keepDims > 0;
2560
2561 bool isSupported = false;
2562 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2563 IsMeanSupported,
2564 data.m_Backends,
2565 isSupported,
2566 inputInfo,
2567 outputInfo,
2568 descriptor);
2569 if (!isSupported)
2570 {
2571 return false;
2572 }
2573
2574 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2575 assert(layer != nullptr);
2576 input.Connect(layer->GetInputSlot(0));
2577
2578 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2579}
2580
2581template<typename HalPolicy,
2582 typename Operation = typename HalPolicy::Operation,
2583 typename Model = typename HalPolicy::Model>
2584bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2585{
2586 using Operand = typename HalPolicy::Operand;
2587
2588 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2589 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2590
2591 if (!input0.IsValid() || !input1.IsValid())
2592 {
2593 return Fail("%s: Operation has invalid inputs", __func__);
2594 }
2595
2596 // The FuseActivation parameter is always the input index 2
2597 // and it should be optional
2598 ActivationFn activationFunction;
2599 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2600 {
2601 return Fail("%s: Operation has invalid inputs", __func__);
2602 }
2603
2604 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2605
2606 if (outputOperand == nullptr)
2607 {
2608 return false;
2609 }
2610
2611 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2612 if (IsDynamicTensor(outputInfo))
2613 {
2614 return Fail("%s: Dynamic output tensors are not supported", __func__);
2615 }
2616
2617 bool isSupported = false;
2618 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2619 IsMultiplicationSupported,
2620 data.m_Backends,
2621 isSupported,
2622 input0.GetTensorInfo(),
2623 input1.GetTensorInfo(),
2624 outputInfo);
2625 if (!isSupported)
2626 {
2627 return false;
2628 }
2629
2630 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2631 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2632
2633 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2634 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2635
2636 if (endLayer != nullptr)
2637 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002638 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2639 if (!isReshapeSupported)
2640 {
2641 return false;
2642 }
2643
Mike Kelly46272802019-08-14 17:00:48 +01002644 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2645 }
2646 else
2647 {
2648 return Fail("%s: ProcessActivation failed", __func__);
2649 }
2650}
2651
2652template<typename HalPolicy,
2653 typename Operation = typename HalPolicy::Operation,
2654 typename Model = typename HalPolicy::Model>
2655bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2656{
2657 using Operand = typename HalPolicy::Operand;
2658
Mike Kelly3c673942019-07-25 09:26:06 +01002659 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2660 if (!input.IsValid())
2661 {
2662 return Fail("%s: Operation has invalid inputs", __func__);
2663 }
2664
2665 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2666 unsigned int rank = inputInfo.GetNumDimensions();
2667
2668 armnn::PadDescriptor descriptor;
2669 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2670 {
2671 return Fail("%s: Could not convert paddings", __func__);
2672 }
2673
2674 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2675 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2676 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2677 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2678 {
2679 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2680 }
2681
Mike Kelly46272802019-08-14 17:00:48 +01002682 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002683 if (!output)
2684 {
2685 return Fail("%s: Could not read output", __func__);
2686 }
2687
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002688 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002689 if (IsDynamicTensor(outputInfo))
2690 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002691 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002692 }
2693
2694 bool isSupported = false;
2695 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2696 IsPadSupported,
2697 data.m_Backends,
2698 isSupported,
2699 inputInfo,
2700 outputInfo,
2701 descriptor);
2702 if (!isSupported)
2703 {
2704 return false;
2705 }
2706
2707 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2708 assert(layer != nullptr);
2709 input.Connect(layer->GetInputSlot(0));
2710 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2711
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002712 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002713}
2714
Mike Kelly0a879362019-07-29 16:56:31 +01002715template<typename HalPolicy,
2716 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002717 typename Model = typename HalPolicy::Model>
2718bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2719{
2720 using Operand = typename HalPolicy::Operand;
2721
2722 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2723 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2724 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2725
2726 if (inputOperand == nullptr
2727 || requestedShapeOperand == nullptr
2728 || outputOperand == nullptr)
2729 {
2730 return Fail("%s: Operation has invalid inputs", __func__);
2731 }
2732
2733 if (requestedShapeOperand->dimensions.size() != 1)
2734 {
2735 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2736 __func__, requestedShapeOperand->dimensions.size());
2737 }
2738
2739 std::vector<int32_t> targetDimensions;
2740 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2741 {
2742 return Fail("%s: Could not read values of input 1", __func__);
2743 }
2744
2745 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2746
2747 Shape requestedShape;
2748 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2749 // function that resolves these values into a fully specified tensor shape.
2750 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2751 {
2752 return Fail("%s: Failed to resolve the requested shape", __func__);
2753 }
2754
2755 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2756 if (!SameShape(requestedShape, outputOperandShape))
2757 {
2758 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2759 }
2760
2761 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2762 if (!input.IsValid())
2763 {
2764 return Fail("%s: Could not read input 0", __func__);
2765 }
2766
2767 armnn::ReshapeDescriptor reshapeDescriptor;
2768 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2769 requestedShape.dimensions.data());
2770
2771 bool isSupported = false;
2772 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2773 IsReshapeSupported,
2774 data.m_Backends,
2775 isSupported,
2776 input.GetTensorInfo(),
2777 reshapeDescriptor);
2778 if (!isSupported)
2779 {
2780 return false;
2781 }
2782
2783 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2784 assert(layer != nullptr);
2785 input.Connect(layer->GetInputSlot(0));
2786
2787 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2788}
2789
2790template<typename HalPolicy,
2791 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002792 typename Model = typename HalPolicy::Model>
2793bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2794{
Mike Kelly46272802019-08-14 17:00:48 +01002795 using Operand = typename HalPolicy::Operand;
2796
Mike Kelly0a879362019-07-29 16:56:31 +01002797 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2798 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2799
2800 if (!input0.IsValid() || !input1.IsValid())
2801 {
2802 return Fail("%s: Operation has invalid inputs", __func__);
2803 }
2804
2805 // The FuseActivation parameter is always the input index 2
2806 // and it should be optional
2807 ActivationFn activationFunction;
2808 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2809 {
2810 return Fail("%s: Operation has invalid inputs", __func__);
2811 }
2812
2813 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2814 if (!output)
2815 {
2816 return Fail("%s: Could not read output 0", __func__);
2817 }
2818
2819 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2820 if (IsDynamicTensor(outputInfo))
2821 {
2822 return Fail("%s: Dynamic output tensors are not supported", __func__);
2823 }
2824
2825 bool isSupported = false;
2826 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2827 IsSubtractionSupported,
2828 data.m_Backends,
2829 isSupported,
2830 input0.GetTensorInfo(),
2831 input1.GetTensorInfo(),
2832 outputInfo);
2833 if (!isSupported)
2834 {
2835 return false;
2836 }
2837
2838 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2839 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2840
2841 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2842 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2843
2844 if (endLayer)
2845 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002846 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2847 if (!isReshapeSupported)
2848 {
2849 return false;
2850 }
Mike Kelly0a879362019-07-29 16:56:31 +01002851 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2852 }
2853
2854 return Fail("%s: ProcessActivation failed", __func__);
2855}
2856
Finn Williams23b87b32019-07-30 11:44:05 +01002857template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002858 typename Operation = typename HalPolicy::Operation,
2859 typename Model = typename HalPolicy::Model>
2860bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2861{
2862 using Operand = typename HalPolicy::Operand;
2863
2864 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2865 if (!input.IsValid())
2866 {
2867 return Fail("%s: Operation has invalid inputs", __func__);
2868 }
2869
2870 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2871 unsigned int rank = inputInfo.GetNumDimensions();
2872 if (rank > 4)
2873 {
2874 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2875 }
2876
2877 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2878 if (!output)
2879 {
2880 return Fail("%s: Could not read output 0", __func__);
2881 }
2882
2883 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2884 {
2885 return Fail("%s: Dynamic output tensors are not supported", __func__);
2886 }
2887
2888 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2889 // if the operand index is out of bounds.
2890 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2891
2892 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2893
2894 std::vector<int32_t> axis;
2895 if (!axisOperand)
2896 {
2897 axis.assign(dimensionSequence,
2898 dimensionSequence + rank);
2899 }
2900 else
2901 {
2902 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2903 }
2904
2905 std::vector<uint32_t> outputDims;
2906 for (unsigned int i = 0; i < rank; i++)
2907 {
2908 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2909 auto currentDimension = inputInfo.GetShape()[i];
2910 if (skipSqueeze || currentDimension != 1)
2911 {
2912 outputDims.push_back(currentDimension);
2913 }
2914 }
2915
2916 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2917
2918 armnn::TensorInfo outputInfo = inputInfo;
2919 outputInfo.SetShape(outShape);
2920
2921 armnn::ReshapeDescriptor reshapeDesc;
2922 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2923
2924 bool isSupported = false;
2925 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2926 IsReshapeSupported,
2927 data.m_Backends,
2928 isSupported,
2929 inputInfo,
2930 reshapeDesc);
2931 if (!isSupported)
2932 {
2933 return false;
2934 }
2935
2936 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2937 assert(layer != nullptr);
2938 input.Connect(layer->GetInputSlot(0));
2939
2940 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2941}
2942
2943template<typename HalPolicy,
2944 typename Operation = typename HalPolicy::Operation,
2945 typename Model = typename HalPolicy::Model>
2946bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2947{
2948 using Operand = typename HalPolicy::Operand;
2949
2950 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2951 if (!input.IsValid())
2952 {
2953 return Fail("%s: Operation has invalid inputs", __func__);
2954 }
2955
2956 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2957 unsigned int rank = inputInfo.GetNumDimensions();
2958 if (rank > 4)
2959 {
2960 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2961 }
2962
2963 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2964 if (!output)
2965 {
2966 return Fail("%s: Could not read output 0", __func__);
2967 }
2968
2969 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2970 if (IsDynamicTensor(outputInfo))
2971 {
2972 return Fail("%s: Dynamic output tensors are not supported", __func__);
2973 }
2974
2975 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2976 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2977 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
2978
2979 std::vector<int32_t> beginValues;
2980 std::vector<int32_t> endValues;
2981 std::vector<int32_t> stridesValues;
2982
2983 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
2984 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
2985 {
2986 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
2987 {
2988 return false;
2989 }
2990
2991 if (operandValues.size() != rank)
2992 {
2993 return false;
2994 }
2995
2996 return true;
2997 };
2998
2999 if (!ValidateInputOperands(*beginOperand, beginValues)
3000 || !ValidateInputOperands(*endOperand, endValues)
3001 || !ValidateInputOperands(*stridesOperand, stridesValues))
3002 {
3003 return Fail("%s: Operation has invalid input operand", __func__);
3004 }
3005
3006 // Stride cannot have value '0'
3007 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3008 {
3009 return Fail("%s: Stride must be non-zero value.", __func__);
3010 }
3011
3012 armnn::StridedSliceDescriptor descriptor;
3013 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3014 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3015 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3016 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3017
3018 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3019 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3020 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3021 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3022 {
3023 return Fail("%s: Operation has invalid inputs", __func__);
3024 }
3025
3026 bool isSupported = false;
3027 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3028 IsStridedSliceSupported,
3029 data.m_Backends,
3030 isSupported,
3031 inputInfo,
3032 outputInfo,
3033 descriptor);
3034 if (!isSupported)
3035 {
3036 return false;
3037 }
3038
3039 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3040 assert(layer != nullptr);
3041 input.Connect(layer->GetInputSlot(0));
3042
3043 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3044}
3045
3046template<typename HalPolicy,
3047 typename Operation = typename HalPolicy::Operation,
3048 typename Model = typename HalPolicy::Model>
3049bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3050{
3051 using Operand = typename HalPolicy::Operand;
3052
3053 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3054 if (!input.IsValid())
3055 {
3056 return Fail("%s: Operation has invalid inputs", __func__);
3057 }
3058
3059 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3060 unsigned int rank = inputInfo.GetNumDimensions();
3061 if (rank > 4)
3062 {
3063 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3064 }
3065
3066 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3067 // if the operand index is out of bounds.
3068 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3069
3070 std::vector<int32_t> perm(rank);
3071 if (!permOperand)
3072 {
3073 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3074 for (unsigned int i = rank; i > 0; i--)
3075 {
3076 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3077 }
3078 }
3079 else
3080 {
3081 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3082 }
3083
3084 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3085
3086 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3087 if (!permutationVector.IsEqual(NHWCToArmNN)
3088 && !permutationVector.IsEqual(ArmNNToNHWC)
3089 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3090 {
3091 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3092 }
3093
3094 armnn::PermuteDescriptor permuteDesc;
3095 permuteDesc.m_DimMappings = permutationVector;
3096
3097 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3098 if (!output)
3099 {
3100 return Fail("%s: Could not read output 0", __func__);
3101 }
3102
3103 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3104
3105 bool isSupported = false;
3106 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3107 IsPermuteSupported,
3108 data.m_Backends,
3109 isSupported,
3110 inputInfo,
3111 outputInfo,
3112 permuteDesc);
3113 if (!isSupported)
3114 {
3115 return false;
3116 }
3117
3118 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3119 assert(layer != nullptr);
3120 input.Connect(layer->GetInputSlot(0));
3121
3122 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3123}
3124
3125template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003126 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003127 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003128 typename HalModel = typename HalPolicy::Model>
3129bool ConvertBatchToSpaceNd(const HalOperation& operation,
3130 const HalModel& model,
3131 ConversionData& data)
3132{
Finn Williams23b87b32019-07-30 11:44:05 +01003133
3134 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3135 if (!input.IsValid())
3136 {
3137 return Fail("%s: Operation has invalid inputs", __func__);
3138 }
3139
3140 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3141 if (!output)
3142 {
3143 return Fail("%s: Could not read output 0", __func__);
3144 }
3145
3146 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3147 if (IsDynamicTensor(outputInfo))
3148 {
3149 return Fail("%s: Dynamic output tensors are not supported", __func__);
3150 }
3151
3152 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3153 if (!blockOperand)
3154 {
3155 return Fail("%s: Could not read input 1", __func__);
3156 }
3157
3158 // Convert the block operand to int32
3159 std::vector<int32_t> block;
3160 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3161 {
3162 return Fail("%s: Input 1 has invalid values", __func__);
3163 }
3164
3165 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3166
3167 unsigned int rank = inputInfo.GetNumDimensions();
3168 if (rank != 4)
3169 {
3170 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3171 }
3172
3173 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3174 {
3175 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3176 " greater than or equal to 1", __func__);
3177 }
3178
3179 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3180 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3181 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3182
3183 if (Is12Operand(*output))
3184 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003185 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003186 }
3187 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3188 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3189
3190 bool isSupported = false;
3191 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3192 IsBatchToSpaceNdSupported,
3193 data.m_Backends,
3194 isSupported,
3195 inputInfo,
3196 outputInfo,
3197 batchToSpaceNdDesc);
3198 if (!isSupported)
3199 {
3200 return false;
3201 }
3202
3203 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3204 assert(layer != nullptr);
3205 input.Connect(layer->GetInputSlot(0));
3206
3207 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3208}
Mike Kelly0a879362019-07-29 16:56:31 +01003209
Finn Williamsd74c5052019-07-30 17:06:00 +01003210template<typename HalPolicy,
3211 typename HalOperation = typename HalPolicy::Operation,
3212 typename HalOperand = typename HalPolicy::Operand,
3213 typename HalModel = typename HalPolicy::Model>
3214bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3215{
3216 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3217 if (!input.IsValid())
3218 {
3219 return Fail("%s: Operation has invalid inputs", __func__);
3220 }
3221
3222 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3223 unsigned int rank = inputInfo.GetNumDimensions();
3224 unsigned int spatialDim = rank - 2;
3225
3226 if (rank != 4)
3227 {
3228 Fail("%s: Only inputs with rank 4 are supported", __func__);
3229 }
3230
3231 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3232 if (!output)
3233 {
3234 return Fail("%s: Could not read output 0", __func__);
3235 }
3236
3237 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3238 if (IsDynamicTensor(outputInfo))
3239 {
3240 return Fail("%s: Dynamic output tensors are not supported", __func__);
3241 }
3242
3243 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3244 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3245
3246 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3247 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3248 {
3249 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3250 }
3251
3252 std::vector<int32_t> blockShape;
3253 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3254 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3255 {
3256 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3257 }
3258
3259 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3260 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3261 {
3262 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3263 }
3264
3265 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3266 std::vector<int32_t> paddings;
3267 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3268 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3269 {
3270 int paddingBeforeInput = paddings[i];
3271 int paddingAfterInput = paddings[i + 1];
3272 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3273 {
3274 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3275 }
3276
3277 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3278 }
3279
3280 armnn::SpaceToBatchNdDescriptor descriptor;
3281 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3282 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3283 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3284
3285 if (Is12Operand(*output))
3286 {
3287 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3288 }
3289
3290 bool isSupported = false;
3291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3292 IsSpaceToBatchNdSupported,
3293 data.m_Backends,
3294 isSupported,
3295 inputInfo,
3296 outputInfo,
3297 descriptor);
3298 if (!isSupported)
3299 {
3300 return false;
3301 }
3302
3303 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3304 assert(layer != nullptr);
3305 input.Connect(layer->GetInputSlot(0));
3306
3307 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3308}
3309
Kevin May407718f2019-09-09 14:46:41 +01003310template<typename HalPolicy,
3311 typename HalOperation = typename HalPolicy::Operation,
3312 typename HalModel = typename HalPolicy::Model>
3313bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3314{
3315 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3316
3317 if (!input.IsValid())
3318 {
3319 return Fail("%s: Operation has invalid input", __func__);
3320 }
3321
3322 using HalOperand = typename HalPolicy::Operand;
3323 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3324 if (!output)
3325 {
3326 return Fail("%s: Could not read output 0", __func__);
3327 }
3328
3329 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3330 if (IsDynamicTensor(outputInfo))
3331 {
3332 return Fail("%s: Dynamic output tensors are not supported", __func__);
3333 }
3334
3335 bool isSupported = false;
3336 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3337 IsAbsSupported,
3338 data.m_Backends,
3339 isSupported,
3340 input.GetTensorInfo(),
3341 outputInfo);
3342
3343 if (!isSupported)
3344 {
3345 return false;
3346 }
3347
3348 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3349 assert(layer != nullptr);
3350 input.Connect(layer->GetInputSlot(0));
3351
3352 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3353}
3354
3355
saoste01b8471482018-10-10 09:44:51 +01003356} // namespace armnn_driver