blob: e4ac4a5a79a9fd7010a9a7b8f5dacc4d9dc8f563 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Mike Kellyb5fdf382019-06-11 16:35:25 +0100169template<typename Operand>
170armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
191 type == V1_2::OperandType::TENSOR_INT32;
192}
193
194#endif
195
196inline bool IsBool(V1_0::Operand)
197{
198 return false;
199}
200
Sadik Armagan61113162019-07-25 09:09:40 +0100201inline bool Is12Operand(V1_0::Operand)
202{
203 return false;
204}
205
Mike Kellyb5fdf382019-06-11 16:35:25 +0100206#ifdef ARMNN_ANDROID_NN_V1_2
207
208inline bool IsBool(V1_2::Operand operand)
209{
210 return operand.type == V1_2::OperandType::BOOL;
211}
212
Sadik Armagan61113162019-07-25 09:09:40 +0100213/// Checks if a operand is 1_2 Operand
214inline bool Is12Operand(V1_2::Operand)
215{
216 return true;
217}
218
Mike Kellyb5fdf382019-06-11 16:35:25 +0100219#endif
220
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100221template<typename LayerHandleType>
222armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
223 armnn::TensorInfo reshapeInfo)
224{
225 armnn::ReshapeDescriptor reshapeDescriptor;
226 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
227
228 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
229 BOOST_ASSERT(reshapeLayer != nullptr);
230
231 // Attach the input layer to the reshape layer
232 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
233 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
234
235 return *reshapeLayer;
236}
237
Sadik Armagan64b19b52019-08-19 09:49:58 +0100238bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
239 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100240{
241 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100242
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100243 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
244 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
245
246 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
247 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
248
249 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100250 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100251 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
252 input0.Connect(startLayer->GetInputSlot(0));
253 input1.Connect(startLayer->GetInputSlot(1));
254
Sadik Armagan64b19b52019-08-19 09:49:58 +0100255 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100256 }
257
258 // Since the number of dimensions do not match then we need to add degenerate dimensions
259 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
260
261 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
262 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
263 boost::numeric_cast<int>(inputDimensions1));
264
265 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
266 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
267 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
268
269 const armnn::TensorShape& smallShape = smallInfo.GetShape();
270 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
271 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
272 {
273 reshapedDimensions[i] = smallShape[i - sizeDifference];
274 }
275
276 armnn::TensorInfo reshapedInfo = smallInfo;
277 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
278 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100279
280 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
281 armnn::ReshapeDescriptor reshapeDescriptor;
282
283 bool isSupported = false;
284 FORWARD_LAYER_SUPPORT_FUNC(__func__,
285 IsReshapeSupported,
286 data.m_Backends,
287 isSupported,
288 reshapedInfo,
289 reshapeDescriptor);
290 if (!isSupported)
291 {
292 return false;
293 }
294
295 BOOST_ASSERT(data.m_Network != nullptr);
296 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100297
298 if (input0IsSmaller)
299 {
300 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
301 //
302 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100303 // | |
304 // Reshape |
305 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100306 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100307
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100308 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
309 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100310 }
311 else
312 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
314 //
315 // Input0 Input1
316 // | |
317 // | Reshape
318 // \ /
319 // StartLayer
320
arovir01b0717b52018-09-05 17:03:25 +0100321 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100322 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100323 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100324
325 return true;
arovir01b0717b52018-09-05 17:03:25 +0100326}
327
328void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
329 android::nn::PaddingScheme scheme)
330{
331 int32_t padHead;
332 int32_t padTail;
333 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
334 outPadHead = boost::numeric_cast<uint32_t>(padHead);
335 outPadTail = boost::numeric_cast<uint32_t>(padTail);
336}
337
Mike Kelly86b36d42019-07-12 16:39:33 +0100338#ifdef ARMNN_ANDROID_NN_V1_2
339
340void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
341 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
342{
343 int32_t padHead;
344 int32_t padTail;
345 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
346 outPadHead = boost::numeric_cast<uint32_t>(padHead);
347 outPadTail = boost::numeric_cast<uint32_t>(padTail);
348}
349
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100350void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
351 int32_t& outPadTail, android::nn::PaddingScheme scheme)
352{
353 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
354}
355
Mike Kelly86b36d42019-07-12 16:39:33 +0100356#endif
357
Matthew Bentham912b3622019-05-03 15:49:14 +0100358Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100359{
360 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100361 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100362 shape.dimensions = operand.dimensions;
363 shape.scale = operand.scale;
364 shape.offset = operand.zeroPoint;
365 return shape;
366}
367
Mike Kelly46272802019-08-14 17:00:48 +0100368#ifdef ARMNN_ANDROID_NN_V1_2
369
370Shape GetOperandShape(const V1_2::Operand& operand)
371{
372 Shape shape;
373 shape.type = OperandType(operand.type);
374 shape.dimensions = operand.dimensions;
375 shape.scale = operand.scale;
376 shape.offset = operand.zeroPoint;
377 return shape;
378}
379
380#endif
381
arovir01b0717b52018-09-05 17:03:25 +0100382// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
383// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100384// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
385// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100386void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
387 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
388{
389 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
390 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
391 {
392 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
393 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
394 {
395 ALOGW("Bias quantization scale has been modified to match input*weights");
396 biasInfo.SetQuantizationScale(expectedBiasScale);
397 }
398 }
399}
400
401// 4D Tensor Permutations
402const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
403const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
404const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
405const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
406
407// 3D Permutation Vectors
408const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
409const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
410const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
411
412template<typename OSlot>
413armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
414 const armnn::PermutationVector& mappings)
415{
416 // Add swizzle layer
417 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
418
419 BOOST_ASSERT(layer != nullptr);
420
421 // Connect input to swizzle layer
422 input.Connect(layer->GetInputSlot(0));
423
424 // Setup swizzled output
425 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
426 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
427
428 return *layer;
429}
430
431void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
432{
433 // Add swizzle layer
434 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
435 // Connect swizzled input to layer
436 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
437}
438
439armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
440{
441 // Add deswizzle layer
442 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
443 return deswizzleLayer;
444}
445
446// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
447armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
448 LayerInputHandle& input,
449 armnn::IConnectableLayer& firstLayer,
450 armnn::IConnectableLayer& lastLayer)
451{
452 SwizzleIn(network, input, firstLayer, 0);
453 return DeswizzleOut(network, lastLayer, 0);
454}
455
456// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
457armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
458 armnn::IConnectableLayer& layer)
459{
460 return SwizzleInDeswizzleOut(network, input, layer, layer);
461}
462
463bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
464 const armnn::TensorShape & outputShape,
465 uint32_t concatDim)
466{
467 // Validate the output shape is correct given the input shapes (which have just been validated)
468 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
469 if (outputShape.GetNumDimensions() != numDimensions)
470 {
471 return Fail("%s: Output shape has wrong number of dimensions", __func__);
472 }
473
474 unsigned int outputSizeAlongConcatenatedDimension = 0;
475 for (unsigned int i = 0; i < inputShapes.size(); i++)
476 {
477 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
478 }
479
480 for (unsigned int i = 0; i < numDimensions; ++i)
481 {
482 if (i == concatDim)
483 {
484 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
485 {
486 return Fail(
487 "%s: Invalid output shape for dimension %d (%d != %d)",
488 __func__,
489 i,
490 outputShape[i],
491 outputSizeAlongConcatenatedDimension);
492 }
493 }
494 else
495 {
496 if (outputShape[i] != inputShapes[0][i])
497 {
498 return Fail("%s: Invalid output shape", __func__);
499 }
500 }
501 }
502
503 return true;
504}
505
506bool RequiresReshape(armnn::TensorShape & inputShape)
507{
508 return inputShape.GetNumDimensions() < 3;
509}
510
arovir01b0717b52018-09-05 17:03:25 +0100511void SwizzleInputs(armnn::INetwork& network,
512 std::vector<LayerInputHandle>& inputs,
513 std::vector<armnn::TensorShape>& inputShapes,
514 const armnn::PermutationVector& mapping)
515{
516 if (!mapping.IsEqual(IdentityPermutation4D))
517 {
518 size_t nInputs = inputs.size();
519 for (size_t i=0; i<nInputs; ++i)
520 {
521 // add swizzle layer
522 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
523 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
524 auto& outputInfo = outputSlot.GetTensorInfo();
525 // replace inputs with the swizzled ones
526 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
527 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
528 }
529 }
530}
531
narpra01f176d5a2018-11-18 20:17:48 +0000532bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
533 int32_t & concatDimension,
534 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100535{
narpra01f176d5a2018-11-18 20:17:48 +0000536 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100537 BOOST_ASSERT(numberOfDimensions >= 3);
538
539 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000540 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
541 // or along dimension 0 or 2 for a 3-D tensor.
542 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100543 {
narpra01f176d5a2018-11-18 20:17:48 +0000544 concatDimension = 1;
545 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
546 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100547 }
narpra01f176d5a2018-11-18 20:17:48 +0000548 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100549 {
narpra01f176d5a2018-11-18 20:17:48 +0000550 concatDimension = 0;
551 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
552 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100553 }
narpra01f176d5a2018-11-18 20:17:48 +0000554 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100555}
556
557} // anonymous namespace
558
559namespace armnn_driver
560{
561
562//// Creates an ArmNN activation layer and connects it to the given layer, if the
563//// passed in AndroidNN activation function requires so.
564//// @return The end layer of the sequence of layers built for the given AndroidNN
565//// activation function or nullptr if an error occurred (e.g. unsupported activation).
566//// Note that the end layer matches the input layer if no activation is required
567//// (the sequence of layers has length 1).
568armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
569 ActivationFn activation,
570 armnn::IConnectableLayer* prevLayer,
571 ConversionData& data);
572
573} // namespace armnn_driver
574
575///
576/// Utility templates
577///
578
579namespace armnn_driver
580{
581
582using namespace android::nn;
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
585 typename HalOperand = typename HalPolicy::Operand,
586 typename HalOperation = typename HalPolicy::Operation,
587 typename HalModel = typename HalPolicy::Model>
588const HalOperand* GetInputOperand(const HalOperation& operation,
589 uint32_t inputIndex,
590 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100591 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100592{
593 if (inputIndex >= operation.inputs.size())
594 {
saoste01b8471482018-10-10 09:44:51 +0100595 if (failOnIndexOutOfBounds)
596 {
597 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
598 }
arovir01b0717b52018-09-05 17:03:25 +0100599 return nullptr;
600 }
601
602 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
603 return &model.operands[operation.inputs[inputIndex]];
604}
605
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100606template<typename HalPolicy,
607 typename HalOperand = typename HalPolicy::Operand,
608 typename HalOperation = typename HalPolicy::Operation,
609 typename HalModel = typename HalPolicy::Model>
610const HalOperand* GetOutputOperand(const HalOperation& operation,
611 uint32_t outputIndex,
612 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100613{
614 if (outputIndex >= operation.outputs.size())
615 {
616 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
617 return nullptr;
618 }
619
620 // Model should have been validated beforehand
621 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
622
623 return &model.operands[operation.outputs[outputIndex]];
624}
625
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100626template<typename HalPolicy,
627 typename HalOperand = typename HalPolicy::Operand,
628 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100629const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100630 const HalModel& model,
631 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000632 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100633{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100634 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100635
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100636 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100637 switch (operand.lifetime)
638 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100639 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100640 {
641 // Constant found in model.operandValues
642 valueStart = &model.operandValues[operand.location.offset];
643 break;
644 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100645 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100646 {
647 // Constant specified via a Memory object
648 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
649 break;
650 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000652 {
653 // An optional input tensor with no values is not an error so should not register as a fail
654 if (optional)
655 {
656 valueStart = nullptr;
657 break;
658 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100659 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000660 }
arovir01b0717b52018-09-05 17:03:25 +0100661 default:
662 {
663 // Unsupported/invalid (e.g. can't get value of an input to the model)
664 Fail("%s: unsupported/invalid operand lifetime: %s",
665 __func__, toString(operand.lifetime).c_str());
666 valueStart = nullptr;
667 }
668 }
669
670 return valueStart;
671}
672
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100673template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100674 typename HalOperation = typename HalPolicy::Operation,
675 typename HalModel = typename HalPolicy::Model,
676 typename HalOperandType = typename HalPolicy::OperandType>
677bool GetOperandType(const HalOperation& operation,
678 uint32_t inputIndex,
679 const HalModel& model,
680 HalOperandType& type)
681{
682 using HalOperand = typename HalPolicy::Operand;
683
684 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
685 if (!operand)
686 {
687 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
688 }
689
690 type = operand->type;
691 return true;
692}
693
694template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100695 typename HalOperand = typename HalPolicy::Operand,
696 typename HalModel = typename HalPolicy::Model>
697ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
698 const HalModel& model,
699 const ConversionData& data,
700 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
701 const armnn::TensorShape* overrideTensorShape = nullptr,
702 bool optional = false)
703{
704 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
705
706 if (!IsOperandTypeSupportedForTensors(operand.type))
707 {
708 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
709 return ConstTensorPin();
710 }
711
712 if (!optional &&
713 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
714 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
715 operand.lifetime != HalOperandLifeTime::NO_VALUE)
716 {
717 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
718 return ConstTensorPin();
719 }
720
721 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
722 if (!valueStart)
723 {
724 if (optional)
725 {
726 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
727 return ConstTensorPin(true);
728 }
729 // mandatory tensor with no values
730 Fail("%s: failed to get operand address", __func__);
731 return ConstTensorPin();
732 }
733
734 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
735 if (overrideTensorShape != nullptr)
736 {
737 tensorInfo.SetShape(*overrideTensorShape);
738 }
739 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
740}
741
742template<typename HalPolicy,
743 typename HalOperation = typename HalPolicy::Operation,
744 typename HalModel = typename HalPolicy::Model>
745ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
746 uint32_t inputIndex,
747 const HalModel& model,
748 const ConversionData& data,
749 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
750 const armnn::TensorShape* overrideTensorShape = nullptr,
751 bool optional = false)
752{
753 using HalOperand = typename HalPolicy::Operand;
754
755 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
756 if (!operand)
757 {
758 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
759 return ConstTensorPin();
760 }
761 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
762 model,
763 data,
764 dimensionMappings,
765 overrideTensorShape,
766 optional);
767}
768
769template<typename HalPolicy,
770 typename OutputType,
771 typename HalOperandType = typename HalPolicy::OperandType,
772 typename HalOperation = typename HalPolicy::Operation,
773 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100774bool GetInputScalar(const HalOperation& operation,
775 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100776 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100777 OutputType& outValue,
778 const HalModel& model,
779 const ConversionData& data)
780{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 using HalOperand = typename HalPolicy::Operand;
782
783 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100784 if (!operand)
785 {
786 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
787 }
788
789 if (operand->type != type)
790 {
791 return Fail("%s: unexpected operand type: %s (should be %s)",
792 __func__, toString(operand->type).c_str(), toString(type).c_str());
793 }
794
795 if (operand->location.length != sizeof(OutputType))
796 {
797 return Fail("%s: incorrect operand location length: %i (should be %i)",
798 __func__, operand->location.length, sizeof(OutputType));
799 }
800
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100802 if (!valueAddress)
803 {
804 return Fail("%s: failed to get address for operand", __func__);
805 }
806
807 outValue = *(static_cast<const OutputType*>(valueAddress));
808 return true;
809}
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100814bool GetInputInt32(const HalOperation& operation,
815 uint32_t inputIndex,
816 int32_t& outValue,
817 const HalModel& model,
818 const ConversionData& data)
819{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100821}
822
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100823template<typename HalPolicy,
824 typename HalOperation = typename HalPolicy::Operation,
825 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100826bool GetInputFloat32(const HalOperation& operation,
827 uint32_t inputIndex,
828 float& outValue,
829 const HalModel& model,
830 const ConversionData& data)
831{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100832 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100833}
834
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100835template<typename HalPolicy,
836 typename HalOperation = typename HalPolicy::Operation,
837 typename HalOperandType = typename HalPolicy::OperandType,
838 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100839bool GetInputActivationFunctionImpl(const HalOperation& operation,
840 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100841 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100842 ActivationFn& outActivationFunction,
843 const HalModel& model,
844 const ConversionData& data)
845{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100846 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100847 {
848 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
849 __func__,
850 toString(type).c_str(),
851 toString(OperandType::INT32).c_str(),
852 toString(OperandType::TENSOR_INT32).c_str());
853 }
854
855 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100857 {
858 return Fail("%s: failed to get activation input value", __func__);
859 }
860 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
861 return true;
862}
863
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100864template<typename HalPolicy,
865 typename HalOperation = typename HalPolicy::Operation,
866 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100867bool GetInputActivationFunction(const HalOperation& operation,
868 uint32_t inputIndex,
869 ActivationFn& outActivationFunction,
870 const HalModel& model,
871 const ConversionData& data)
872{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100873 return GetInputActivationFunctionImpl<HalPolicy>(operation,
874 inputIndex,
875 HalPolicy::OperandType::INT32,
876 outActivationFunction,
877 model,
878 data);
arovir01b0717b52018-09-05 17:03:25 +0100879}
880
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100881template<typename HalPolicy,
882 typename HalOperation = typename HalPolicy::Operation,
883 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100884bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
885 uint32_t inputIndex,
886 ActivationFn& outActivationFunction,
887 const HalModel& model,
888 const ConversionData& data)
889{
890 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100891 return GetInputActivationFunctionImpl<HalPolicy>(operation,
892 inputIndex,
893 HalPolicy::OperandType::INT32,
894 outActivationFunction,
895 model,
896 data);
arovir01b0717b52018-09-05 17:03:25 +0100897}
898
899
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100900template<typename HalPolicy,
901 typename HalOperation = typename HalPolicy::Operation,
902 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100903bool GetOptionalInputActivation(const HalOperation& operation,
904 uint32_t inputIndex,
905 ActivationFn& activationFunction,
906 const HalModel& model,
907 const ConversionData& data)
908{
909 if (operation.inputs.size() <= inputIndex)
910 {
911 activationFunction = ActivationFn::kActivationNone;
912 }
913 else
914 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100915 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100916 {
917 return Fail("%s: Operation has invalid inputs", __func__);
918 }
919 }
920 return true;
921}
922
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100923template<typename HalPolicy,
924 typename ConvolutionDescriptor,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100927bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
928 uint32_t dilationXIndex,
929 ConvolutionDescriptor& descriptor,
930 const HalModel& model,
931 const ConversionData& data)
932{
933 bool success = true;
934 if (operation.inputs.size() >= dilationXIndex + 2)
935 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936 success &= GetInputScalar<HalPolicy>(operation,
937 dilationXIndex,
938 HalPolicy::OperandType::INT32,
939 descriptor.m_DilationX,
940 model,
941 data);
942 success &= GetInputScalar<HalPolicy>(operation,
943 dilationXIndex + 1,
944 HalPolicy::OperandType::INT32,
945 descriptor.m_DilationY,
946 model,
947 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100948 }
949
950 return success;
951}
952
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100953template<typename HalPolicy,
954 typename HalOperand = typename HalPolicy::Operand,
955 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100956bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100957 std::vector<int32_t>& outValues,
958 const HalModel& model,
959 const ConversionData& data)
960{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100961 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100962 {
963 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
964 }
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100967 if (!startAddress)
968 {
969 return Fail("%s: failed to get operand address", __func__, operand.type);
970 }
971
972 // Check number of bytes is sensible
973 const uint32_t numBytes = operand.location.length;
974 if (numBytes % sizeof(int32_t) != 0)
975 {
976 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
977 __func__, numBytes, sizeof(int32_t));
978 }
979
980 outValues.resize(numBytes / sizeof(int32_t));
981 memcpy(outValues.data(), startAddress, numBytes);
982 return true;
983}
984
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100985template<typename HalPolicy,
986 typename HalOperation = typename HalPolicy::Operation,
987 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100988bool GetInputPaddingScheme(const HalOperation& operation,
989 uint32_t inputIndex,
990 PaddingScheme& outPaddingScheme,
991 const HalModel& model,
992 const ConversionData& data)
993{
994 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100995 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100996 {
997 return Fail("%s: failed to get padding scheme input value", __func__);
998 }
999
1000 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1001 return true;
1002}
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004template<typename HalPolicy,
1005 typename HalOperation = typename HalPolicy::Operation,
1006 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001007LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1008 uint32_t inputIndex,
1009 const HalModel& model,
1010 ConversionData& data)
1011{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001012 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001013 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001014 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1015
1016 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001017 if (!operand)
1018 {
1019 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1020 return LayerInputHandle();
1021 }
1022
1023 if (!IsOperandTypeSupportedForTensors(operand->type))
1024 {
1025 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1026 return LayerInputHandle();
1027 }
1028
Sadik Armagan44bcc022019-06-18 17:21:36 +01001029 try
arovir01b0717b52018-09-05 17:03:25 +01001030 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001031 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001032 if (IsDynamicTensor(operandTensorInfo))
1033 {
1034 Fail("%s: dynamic input tensors are not supported", __func__);
1035 return LayerInputHandle();
1036 }
arovir01b0717b52018-09-05 17:03:25 +01001037
Sadik Armagan44bcc022019-06-18 17:21:36 +01001038 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001039 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001040 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001041 {
1042 // NOTE: We must check whether we can support the input tensor on at least one
1043 // of the provided backends; otherwise we cannot convert the operation
1044 bool isInputSupported = false;
1045 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1046 IsInputSupported,
1047 data.m_Backends,
1048 isInputSupported,
1049 operandTensorInfo);
1050
1051 if (!isInputSupported)
1052 {
1053 Fail("%s: unsupported input tensor", __func__);
1054 return LayerInputHandle();
1055 }
1056
1057 BOOST_FALLTHROUGH; // intentional fallthrough
1058 }
1059 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001060 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001061 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001062 // The tensor is either an operand internal to the model, or a model input.
1063 // It can be associated with an ArmNN output slot for an existing layer.
1064
1065 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1066 const uint32_t operandIndex = operation.inputs[inputIndex];
1067 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001068 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001069 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001070 case HalOperandLifeTime::CONSTANT_REFERENCE:
1071 {
1072 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1073 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1074 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001075 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001076 bool isSupported = false;
1077 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1078 IsConstantSupported,
1079 data.m_Backends,
1080 isSupported,
1081 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001082 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 {
1084 return LayerInputHandle();
1085 }
1086
1087 armnn::IConnectableLayer* constantLayer =
1088 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1089 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1090 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1091
1092 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1093 }
1094 else
1095 {
1096 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001097 return LayerInputHandle();
1098 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001099 break;
arovir01b0717b52018-09-05 17:03:25 +01001100 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001101 default:
arovir01b0717b52018-09-05 17:03:25 +01001102 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 // Unsupported lifetime for an input tensor
1104 Fail("%s: unsupported lifetime for input tensor: %s",
1105 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001106 return LayerInputHandle();
1107 }
arovir01b0717b52018-09-05 17:03:25 +01001108 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001109 }
1110 catch (UnsupportedOperand<HalOperandType>& e)
1111 {
1112 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1113 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001114 }
1115}
1116
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117template<typename HalPolicy,
1118 typename HalOperation = typename HalPolicy::Operation,
1119 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001120bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1121 uint32_t operationOutputIndex,
1122 armnn::IConnectableLayer& layer,
1123 uint32_t layerOutputIndex,
1124 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001125 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001126{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001127 using HalOperand = typename HalPolicy::Operand;
1128
1129 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001130 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1131 {
1132 return false;
1133 }
1134
1135 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1136
1137 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1138 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1139
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001140 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001141
1142 return true;
1143}
1144
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001145template<typename HalPolicy,
1146 typename HalOperation = typename HalPolicy::Operation,
1147 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001148armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1149 uint32_t inputIndex,
1150 const HalModel& model,
1151 ConversionData& data)
1152{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001153 using HalOperand = typename HalPolicy::Operand;
1154
1155 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001156 if (!operand)
1157 {
1158 return armnn::DataLayout::NHWC;
1159 }
1160
1161 if (!IsBool(*operand))
1162 {
1163 return armnn::DataLayout::NHWC;
1164 }
1165
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001166 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001167 if (!valueAddress)
1168 {
1169 return armnn::DataLayout::NHWC;
1170 }
1171
1172 if (*(static_cast<const bool*>(valueAddress)))
1173 {
1174 return armnn::DataLayout::NCHW;
1175 }
1176 else
1177 {
1178 return armnn::DataLayout::NHWC;
1179 }
1180}
1181
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001182template<typename HalPolicy,
1183 typename HalOperation = typename HalPolicy::Operation,
1184 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001185bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1186 uint32_t outputIndex,
1187 armnn::IConnectableLayer& layer,
1188 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001189 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001190{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001191 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1192 outputIndex,
1193 layer,
1194 outputIndex,
1195 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001196 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001197}
1198
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199template<typename HalPolicy,
1200 typename HalOperation = typename HalPolicy::Operation,
1201 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001202bool ConvertToActivation(const HalOperation& operation,
1203 const char* operationName,
1204 const armnn::ActivationDescriptor& activationDesc,
1205 const HalModel& model,
1206 ConversionData& data)
1207{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001208 using HalOperand = typename HalPolicy::Operand;
1209
1210 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001211 if (!input.IsValid())
1212 {
1213 return Fail("%s: Input 0 is invalid", operationName);
1214 }
1215
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001216 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001217 if (!outputOperand)
1218 {
1219 return false;
1220 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001221
1222 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001223 if (IsDynamicTensor(outInfo))
1224 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001225 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001226 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001227
1228 bool isSupported = false;
1229 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1230 IsActivationSupported,
1231 data.m_Backends,
1232 isSupported,
1233 input.GetTensorInfo(),
1234 outInfo,
1235 activationDesc);
1236 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001237 {
1238 return false;
1239 }
1240
1241 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1242 BOOST_ASSERT(layer != nullptr);
1243 input.Connect(layer->GetInputSlot(0));
1244
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001245 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001246}
1247
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001248template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001249 typename HalOperation = typename HalPolicy::Operation,
1250 typename HalModel = typename HalPolicy::Model>
1251bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1252{
1253 armnn::ActivationDescriptor desc;
1254 desc.m_Function = armnn::ActivationFunction::ReLu;
1255
1256 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1257}
1258
1259template<typename HalPolicy,
1260 typename HalOperation = typename HalPolicy::Operation,
1261 typename HalModel = typename HalPolicy::Model>
1262bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1263{
1264 armnn::ActivationDescriptor desc;
1265 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1266 desc.m_A = 1.0f;
1267 desc.m_B = -1.0f;
1268
1269 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1270}
1271
1272template<typename HalPolicy,
1273 typename HalOperation = typename HalPolicy::Operation,
1274 typename HalModel = typename HalPolicy::Model>
1275bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1276{
1277 armnn::ActivationDescriptor desc;
1278 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1279 desc.m_A = 6.0f;
1280
1281 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1282}
1283
1284template<typename HalPolicy,
1285 typename HalOperation = typename HalPolicy::Operation,
1286 typename HalModel = typename HalPolicy::Model>
1287bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1288{
1289 armnn::ActivationDescriptor desc;
1290 desc.m_Function = armnn::ActivationFunction::TanH;
1291 desc.m_A = 1.0f; // android nn does not support tanH parameters
1292 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1293
1294 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1295}
1296
1297template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001298 typename HalOperation = typename HalPolicy::Operation,
1299 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001300bool ConvertPaddings(const HalOperation& operation,
1301 const HalModel& model,
1302 ConversionData& data,
1303 unsigned int rank,
1304 armnn::PadDescriptor& padDescriptor)
1305{
1306 using HalOperand = typename HalPolicy::Operand;
1307
1308 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1309 if (!paddingsOperand)
1310 {
1311 return Fail("%s: Could not read paddings operand", __func__);
1312 }
1313
1314 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1315 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1316 {
1317 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1318 }
1319
1320 std::vector<int32_t> paddings;
1321 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1322
1323 // add padding for each dimension of input tensor.
1324 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1325 {
1326 int paddingBeforeInput = paddings[i];
1327 int paddingAfterInput = paddings[i + 1];
1328
1329 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1330 {
1331 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1332 }
1333
1334 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1335 }
1336
1337 return true;
1338}
1339
1340template<typename HalPolicy,
1341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001343bool ConvertPooling2d(const HalOperation& operation,
1344 const char* operationName,
1345 armnn::PoolingAlgorithm poolType,
1346 const HalModel& model,
1347 ConversionData& data)
1348{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001349 using HalOperand = typename HalPolicy::Operand;
1350 using HalOperandType = typename HalPolicy::OperandType;
1351
1352 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001353 if (!input.IsValid())
1354 {
1355 return Fail("%s: Could not read input 0", operationName);
1356 }
1357
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001358 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001359 if (!output)
1360 {
1361 return Fail("%s: Could not read output 0", __func__);
1362 }
1363
1364 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1366
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001367 if (IsDynamicTensor(outputInfo))
1368 {
1369 return Fail("%s: Dynamic output tensors are not supported", __func__);
1370 }
1371
arovir01b0717b52018-09-05 17:03:25 +01001372 armnn::Pooling2dDescriptor desc;
1373 desc.m_PoolType = poolType;
1374 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001375 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001376
1377 ActivationFn activation;
1378
Sadik Armagan15d63e22019-07-26 16:59:35 +01001379 auto inputSize = operation.inputs.size();
1380
1381 if (inputSize >= 10)
1382 {
1383 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1384 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1385 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1386 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1387 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1388 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1389 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1390 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1391 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1392 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1393 {
1394 return Fail("%s: Operation has invalid inputs", operationName);
1395 }
1396
1397 if (Is12Operand(*output))
1398 {
1399 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1400 }
1401 }
1402 else
arovir01b0717b52018-09-05 17:03:25 +01001403 {
1404 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1405 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001406 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1407 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1408 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1409 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1410 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1411 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001412 {
1413 return Fail("%s: Operation has invalid inputs", operationName);
1414 }
1415
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001416 const unsigned int inputWidth = inputInfo.GetShape()[2];
1417 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001418
1419 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1420 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001421
1422 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001423 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001424 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001425 }
1426 }
1427
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001428 bool isSupported = false;
1429 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1430 IsPooling2dSupported,
1431 data.m_Backends,
1432 isSupported,
1433 inputInfo,
1434 outputInfo,
1435 desc);
1436 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001437 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001438 return false;
arovir01b0717b52018-09-05 17:03:25 +01001439 }
arovir01b0717b52018-09-05 17:03:25 +01001440
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001441 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1442 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001443 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001444 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001445 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001446
1447 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1448 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001449 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001450 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001451 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001452
1453 input.Connect(pooling2dLayer->GetInputSlot(0));
1454
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001455 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001456}
1457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001458template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001459 typename Operation = typename HalPolicy::Operation,
1460 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001461bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1462{
1463 using Operand = typename HalPolicy::Operand;
1464
1465 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1466 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1467
1468 if (!input0.IsValid() || !input1.IsValid())
1469 {
1470 return Fail("%s: Operation has invalid inputs", __func__);
1471 }
1472
1473 // The FuseActivation parameter is always the input index 2
1474 // and it should be optional
1475 ActivationFn activationFunction;
1476 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1477 {
1478 return Fail("%s: Operation has invalid inputs", __func__);
1479 }
1480
1481 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1482 if (!outputOperand)
1483 {
1484 return false;
1485 }
1486
1487 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1488 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1489
1490 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1491 if (IsDynamicTensor(outputInfo))
1492 {
1493 return Fail("%s: Dynamic output tensors are not supported", __func__);
1494 }
1495
1496 bool isSupported = false;
1497 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1498 IsAdditionSupported,
1499 data.m_Backends,
1500 isSupported,
1501 inputInfo0,
1502 inputInfo1,
1503 outputInfo);
1504 if (!isSupported)
1505 {
1506 return false;
1507 }
1508
1509 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1510 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1511
1512 if (endLayer != nullptr)
1513 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001514 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1515 if (!isReshapeSupported)
1516 {
1517 return false;
1518 }
1519
Mike Kelly46272802019-08-14 17:00:48 +01001520 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1521 }
1522 else
1523 {
1524 return Fail("%s: ProcessActivation failed", __func__);
1525 }
1526}
1527
1528template<typename HalPolicy,
1529 typename Operation = typename HalPolicy::Operation,
1530 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001531bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1532{
1533 using HalOperand = typename HalPolicy::Operand;
1534 using HalOperandType = typename HalPolicy::OperandType;
1535
1536 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1537 if (operation.inputs.size() <= 1)
1538 {
1539 return Fail("%s: Operation has insufficient arguments", __func__);
1540 }
1541
1542 // Get inputs and outputs
1543 const std::size_t numInputTensors = operation.inputs.size() - 1;
1544
1545 int32_t concatDim;
1546 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1547 {
1548 return Fail("%s: Operation has invalid inputs", __func__);
1549 }
1550
1551 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1552 if (!outputOperand)
1553 {
1554 return Fail("%s: Operation has no outputs", __func__);
1555 }
1556
1557
1558 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1559 armnn::TensorShape outputShape = outputInfo.GetShape();
1560
1561 //
1562 // handle negative concat dims along the lines of tensorflow as described here:
1563 // https://www.tensorflow.org/api_docs/python/tf/concat
1564 // "negative axis refers to axis + rank(values)-th dimension"
1565 //
1566 if (concatDim < 0)
1567 {
1568 concatDim += outputShape.GetNumDimensions();
1569 }
1570
1571 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1572 {
1573 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1574 }
1575
1576 std::vector<LayerInputHandle> inputHandles;
1577 std::vector<armnn::TensorShape> inputShapes;
1578
1579 inputHandles.reserve(numInputTensors);
1580 inputShapes.reserve(numInputTensors);
1581
1582 bool inputsHaveBeenReshaped = false;
1583 unsigned int tensorDimensionsAdded = 0;
1584
1585 for (uint32_t i = 0; i < numInputTensors; ++i)
1586 {
1587 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1588 if (!operand)
1589 {
1590 return Fail("%s: Operation has invalid inputs", __func__);
1591 }
1592
Teresa Charlin3b959602019-10-31 17:05:47 +00001593 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1594 if (!operandInputHandle.IsValid())
1595 {
1596 return Fail("%s: Operation has invalid inputs", __func__);
1597 }
Mike Kellyb8805202019-07-31 17:25:43 +01001598
Teresa Charlin3b959602019-10-31 17:05:47 +00001599 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001600 if (operandShape.GetNumDimensions() == 0)
1601 {
1602 return Fail("%s: Operands with rank 0 are not supported", __func__);
1603 }
1604
1605 if (RequiresReshape(operandShape))
1606 {
1607 inputsHaveBeenReshaped = true;
1608
1609 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1610
1611 // Expand the tensor to three dimensions
1612 if (operandShape.GetNumDimensions() == 2)
1613 {
1614 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1615 tensorDimensionsAdded = 1;
1616 }
1617 else
1618 {
1619 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1620 tensorDimensionsAdded = 2;
1621 }
1622
1623 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1624 *data.m_Network,
1625 operandInputHandle,
1626 reshapeInfo
1627 );
1628
1629 // Point to the reshape operation rather then the input operation
1630 operandShape = reshapeInfo.GetShape();
1631 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1632 }
1633
1634 inputShapes.emplace_back(operandShape);
1635 inputHandles.emplace_back(operandInputHandle);
1636
1637 if (!inputHandles.back().IsValid())
1638 {
1639 return Fail("%s: Operation has invalid inputs", __func__);
1640 }
1641 }
1642
1643 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1644
1645 if (inputsHaveBeenReshaped)
1646 {
1647 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1648 concatDim += tensorDimensionsAdded;
1649
1650 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1651 if (tensorDimensionsAdded == 1)
1652 {
1653 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1654 }
1655 else if (tensorDimensionsAdded == 2)
1656 {
1657 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1658 }
1659 }
1660
1661 // Check if permutations is required and get the pair of permutations required for the concatenation.
1662 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1663 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1664 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1665
1666 bool needPermute =
1667 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1668
1669 if (needPermute)
1670 {
1671 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1672 }
1673
1674 outputInfo.SetShape(outputShape);
1675
1676 // this is no-op for identity swizzles, otherwise it replaces both
1677 // the handles and shapes with the swizzled layer output handles and shapes
1678 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1679
1680 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1681 armnn::OriginsDescriptor concatDescriptor;
1682
1683 try
1684 {
1685 // The concat descriptor is always created across the only supported concat dimension
1686 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1687 concatDescriptor =
1688 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1689 }
1690 catch (const armnn::Exception& error)
1691 {
1692 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1693 }
1694
1695 // Validate the output shape is correct given the input shapes based on the
1696 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1697 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1698 {
1699 return Fail("%s: Error validating the output shape for concat", __func__);
1700 }
1701
1702 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1703 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1704 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1705
1706 bool isSupported = false;
1707 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1708 IsConcatSupported,
1709 data.m_Backends,
1710 isSupported,
1711 inputTensorInfos,
1712 outputInfo,
1713 concatDescriptor);
1714 if (!isSupported)
1715 {
1716 return false;
1717 }
1718
1719 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1720 assert(layer != nullptr);
1721 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1722
1723 // Connect inputs to the layer
1724 const int numInputSlots = layer->GetNumInputSlots();
1725 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1726 for (int i = 0; i < numInputSlots; ++i)
1727 {
1728 // connect the input directly to the merge (concat) layer
1729 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1730 }
1731
1732 if (needPermute)
1733 {
1734 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1735 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1736 layer->GetOutputSlot(0),
1737 permutationPair.second);
1738 layer = &deswizzleLayer;
1739 }
1740
1741 if (inputsHaveBeenReshaped)
1742 {
1743 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1744
1745 // Undo the reshape knowing the amount of dimensions added
1746 if (tensorDimensionsAdded == 1)
1747 {
1748 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1749 afterConcatInfo.GetShape()[2] }));
1750 }
1751 else if (tensorDimensionsAdded == 2)
1752 {
1753 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1754 }
1755
1756 layer = &AddReshapeLayer(
1757 *data.m_Network,
1758 layer->GetOutputSlot(0),
1759 afterConcatInfo
1760 );
1761 }
1762
1763 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1764}
1765
1766template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001767 typename HalOperation = typename HalPolicy::Operation,
1768 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001769bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1770{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001771 using HalOperand = typename HalPolicy::Operand;
1772 using HalOperandType = typename HalPolicy::OperandType;
1773
1774 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001775 if (!input.IsValid())
1776 {
1777 return Fail("%s: Operation has invalid inputs", __func__);
1778 }
1779
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001780 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001781 if (!output)
1782 {
1783 return Fail("%s: Could not read output 0", __func__);
1784 }
1785
1786 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001787 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001788
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001789 if (IsDynamicTensor(outputInfo))
1790 {
1791 return Fail("%s: Dynamic output tensors are not supported", __func__);
1792 }
1793
Mike Kellyb5fdf382019-06-11 16:35:25 +01001794 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001795 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1796 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001797
1798 if (!weightsPin.IsValid() || !biasPin.IsValid())
1799 {
1800 return Fail("%s: Operation has invalid inputs", __func__);
1801 }
1802
1803 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001804 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001805 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1806
1807 armnn::Convolution2dDescriptor desc;
1808 desc.m_DataLayout = armnn::DataLayout::NHWC;
1809 ActivationFn activation;
1810
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001811 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001812 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001813 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1814 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1815 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1816 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1817 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1818 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001819 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001820 {
1821 return Fail("%s: Operation has invalid inputs", __func__);
1822 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001823 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001824 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001825 {
1826 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001827 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1828 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1829 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001830 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001831 {
1832 return Fail("%s: Operation has invalid inputs", __func__);
1833 }
1834
1835 const uint32_t kernelX = weights.GetShape()[2];
1836 const uint32_t kernelY = weights.GetShape()[1];
1837 const uint32_t inputX = inputInfo.GetShape()[2];
1838 const uint32_t inputY = inputInfo.GetShape()[1];
1839
1840 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1841 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001842 }
1843 else
1844 {
1845 return Fail("%s: Unsupported number of operation inputs", __func__);
1846 }
1847
1848 desc.m_BiasEnabled = true;
1849 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1850
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001851 bool isSupported = false;
1852 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1853 IsConvolution2dSupported,
1854 data.m_Backends,
1855 isSupported,
1856 inputInfo,
1857 outputInfo,
1858 desc,
1859 weights.GetInfo(),
1860 biases);
1861 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001862 {
1863 return false;
1864 }
1865
1866 armnn::IConnectableLayer* startLayer =
1867 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1868
1869 if (!startLayer)
1870 {
1871 return Fail("%s: AddConvolution2dLayer failed", __func__);
1872 }
1873
1874 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1875
1876 if (!endLayer)
1877 {
1878 return Fail("%s: ProcessActivation failed", __func__);
1879 }
1880
1881 input.Connect(startLayer->GetInputSlot(0));
1882
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001883 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001884}
1885
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001886template<typename HalPolicy,
1887 typename HalOperation = typename HalPolicy::Operation,
1888 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01001889bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
1890{
1891 using HalOperand = typename HalPolicy::Operand;
1892 using HalOperandType = typename HalPolicy::OperandType;
1893
1894 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1895 if (!input.IsValid() )
1896 {
1897 return Fail("%s: Operation has invalid inputs", __func__);
1898 }
1899
1900 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1901 unsigned int rank = inputInfo.GetNumDimensions();
1902 if (rank != 4)
1903 {
1904 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1905 }
1906
1907 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1908 if (!output)
1909 {
1910 return Fail("%s: Could not read output 0", __func__);
1911 }
1912
1913 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1914 if (IsDynamicTensor(outputInfo))
1915 {
1916 return Fail("%s: Dynamic output tensors are not supported", __func__);
1917 }
1918
1919 armnn::DepthToSpaceDescriptor descriptor;
1920
1921 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
1922 if (descriptor.m_BlockSize <= 1)
1923 {
1924 return Fail("%s: Block size must be at least 1 in all dimensions");
1925 }
1926
1927 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
1928 if (Is12Operand(*output))
1929 {
1930 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
1931 }
1932
1933 bool isSupported = false;
1934 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1935 IsDepthToSpaceSupported,
1936 data.m_Backends,
1937 isSupported,
1938 inputInfo,
1939 outputInfo,
1940 descriptor);
1941 if (!isSupported)
1942 {
1943 return false;
1944 }
1945
1946 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1947 assert(layer != nullptr);
1948 input.Connect(layer->GetInputSlot(0));
1949
1950 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1951}
1952
1953template<typename HalPolicy,
1954 typename HalOperation = typename HalPolicy::Operation,
1955 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001956bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1957{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001958 using HalOperand = typename HalPolicy::Operand;
1959 using HalOperandType = typename HalPolicy::OperandType;
1960
1961 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001962
1963 if (!input.IsValid())
1964 {
1965 return Fail("%s: Operation has invalid inputs", __func__);
1966 }
1967
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001968 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001969
1970 if (!output)
1971 {
1972 return Fail("%s: Could not read output 0", __func__);
1973 }
1974
1975 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001976 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001977
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001978 if (IsDynamicTensor(outputInfo))
1979 {
1980 return Fail("%s: Dynamic output tensors are not supported", __func__);
1981 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001982
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001983 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001984 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001985 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001986
1987 if (weightsOperand == nullptr)
1988 {
1989 return Fail("%s: Operand is invalid", __func__);
1990 }
1991 armnn::DepthwiseConvolution2dDescriptor desc;
1992 desc.m_DataLayout = armnn::DataLayout::NHWC;
1993
Mike Kellyb5fdf382019-06-11 16:35:25 +01001994 // Reinterpret weight data as [ H, W, I, M ]
1995 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1996 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001997 inputInfo.GetShape()[3],
1998 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001999
2000 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2001 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2002
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002003 const ConstTensorPin weightsPin =
2004 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2005 1,
2006 model,
2007 data,
2008 HWIMToMIHW,
2009 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002010
2011 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002012 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002013
2014 if (!weightsPin.IsValid() || !biasPin.IsValid())
2015 {
2016 return Fail("%s: Operation has invalid inputs", __func__);
2017 }
2018
2019 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2020 armnn::ConstTensor bias = biasPin.GetConstTensor();
2021 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2022
2023 ActivationFn activation;
2024
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002025 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002026 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002027 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2028 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2029 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2030 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2031 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2032 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002033 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002034 {
2035 return Fail("%s: Operation has invalid inputs", __func__);
2036 }
2037 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002038 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002039 {
2040 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002041 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2042 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2043 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002044 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002045 {
2046 return Fail("%s: Operation has invalid inputs", __func__);
2047 }
2048
2049 const uint32_t kernelX = weights.GetShape()[3];
2050 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002051 const uint32_t inputX = inputInfo.GetShape()[2];
2052 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002053
2054 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2055 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2056 }
2057 else
2058 {
2059 return Fail("%s: Unsupported number of operation inputs", __func__);
2060 }
2061
2062 desc.m_BiasEnabled = true;
2063 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2064
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002065 bool isSupported = false;
2066 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2067 IsDepthwiseConvolutionSupported,
2068 data.m_Backends,
2069 isSupported,
2070 inputInfo,
2071 outputInfo,
2072 desc,
2073 weights.GetInfo(),
2074 biases);
2075 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002076 {
2077 return false;
2078 }
2079
2080 armnn::IConnectableLayer* startLayer =
2081 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2082 if (!startLayer)
2083 {
2084 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2085 }
2086
2087 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2088 if (!endLayer)
2089 {
2090 return Fail("%s: ProcessActivation failed", __func__);
2091 }
2092
2093 input.Connect(startLayer->GetInputSlot(0));
2094
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002095 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002096}
2097
Mike Kelly3c673942019-07-25 09:26:06 +01002098template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002099 typename Operation = typename HalPolicy::Operation,
2100 typename Model = typename HalPolicy::Model>
2101bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002102{
Mike Kelly46272802019-08-14 17:00:48 +01002103 using Operand = typename HalPolicy::Operand;
2104
2105 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2106 if (!input.IsValid())
2107 {
2108 return Fail("%s: Operation has invalid input", __func__);
2109 }
2110
2111 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2112 if (!outputOperand)
2113 {
2114 return Fail("%s: Operation has invalid outputs", __func__);
2115 }
2116
2117 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2118 if (IsDynamicTensor(outputInfo))
2119 {
2120 return Fail("%s: Dynamic output tensors are not supported", __func__);
2121 }
2122
2123 bool isSupported = false;
2124 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2125 IsDequantizeSupported,
2126 data.m_Backends,
2127 isSupported,
2128 input.GetTensorInfo(),
2129 GetTensorInfoForOperand(*outputOperand));
2130 if (!isSupported)
2131 {
2132 return false;
2133 }
2134
2135 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2136 assert(layer != nullptr);
2137 input.Connect(layer->GetInputSlot(0));
2138
2139 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2140}
2141
2142template<typename HalPolicy,
2143 typename Operation = typename HalPolicy::Operation,
2144 typename Model = typename HalPolicy::Model>
2145bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2146{
2147 using Operand = typename HalPolicy::Operand;
2148
2149 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2150 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2151
2152 if (!input0.IsValid() || !input1.IsValid())
2153 {
2154 return Fail("%s: Operation has invalid inputs", __func__);
2155 }
2156
2157 // The FuseActivation parameter is always the input index 2
2158 // and it should be optional
2159 ActivationFn activationFunction;
2160 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2161 {
2162 return Fail("%s: Operation has invalid inputs", __func__);
2163 }
2164
2165 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2166 if (!output)
2167 {
2168 return Fail("%s: Could not read output 0", __func__);
2169 }
2170
2171 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2172 if (IsDynamicTensor(outputInfo))
2173 {
2174 return Fail("%s: Dynamic output tensors are not supported", __func__);
2175 }
2176
2177 bool isSupported = false;
2178 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2179 IsDivisionSupported,
2180 data.m_Backends,
2181 isSupported,
2182 input0.GetTensorInfo(),
2183 input1.GetTensorInfo(),
2184 outputInfo);
2185 if (!isSupported)
2186 {
2187 return false;
2188 }
2189
2190 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2191 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2192
2193 if (endLayer)
2194 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002195 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2196 if (!isReshapeSupported)
2197 {
2198 return false;
2199 }
2200
Mike Kelly46272802019-08-14 17:00:48 +01002201 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2202 }
2203 return Fail("%s: ProcessActivation failed", __func__);
2204}
2205
2206template<typename HalPolicy,
2207 typename Operation = typename HalPolicy::Operation,
2208 typename Model = typename HalPolicy::Model>
2209bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2210{
2211 using Operand = typename HalPolicy::Operand;
2212
2213 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2214 if (!input.IsValid())
2215 {
2216 return Fail("%s: Operation has invalid inputs", __func__);
2217 }
2218
2219 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2220 if (!outputOperand)
2221 {
2222 return Fail("%s: Operation has invalid outputs", __func__);
2223 }
2224
2225 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2226 if (IsDynamicTensor(outputInfo))
2227 {
2228 return Fail("%s: Dynamic output tensors are not supported", __func__);
2229 }
2230
2231 bool isSupported = false;
2232 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2233 IsFloorSupported,
2234 data.m_Backends,
2235 isSupported,
2236 input.GetTensorInfo(),
2237 outputInfo);
2238 if (!isSupported)
2239 {
2240 return false;
2241 }
2242
2243 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2244 assert(layer != nullptr);
2245 input.Connect(layer->GetInputSlot(0));
2246
2247 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2248}
2249
2250template<typename HalPolicy,
2251 typename Operation = typename HalPolicy::Operation,
2252 typename Model = typename HalPolicy::Model>
2253bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2254{
2255 using Operand = typename HalPolicy::Operand;
2256
2257 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2258 if (!input.IsValid())
2259 {
2260 return Fail("%s: Operation has invalid inputs", __func__);
2261 }
2262
2263 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2264 if (!output)
2265 {
2266 return Fail("%s: Could not read output 0", __func__);
2267 }
2268
2269 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2270 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2271
2272 if (IsDynamicTensor(outputInfo))
2273 {
2274 return Fail("%s: Dynamic output tensors are not supported", __func__);
2275 }
2276
2277 // ArmNN does not currently support non-fixed weights or bias
2278 ConstTensorPin weightsPin =
2279 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2280 ConstTensorPin biasPin =
2281 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2282
2283 if (!weightsPin.IsValid() || !biasPin.IsValid())
2284 {
2285 return Fail("%s: Operation has invalid inputs", __func__);
2286 }
2287
2288 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2289 armnn::ConstTensor bias = biasPin.GetConstTensor();
2290 armnn::TensorInfo reshapedInfo = inputInfo;
2291
2292 try
2293 {
2294 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2295 } catch (const std::exception &e) {
2296 return Fail("%s: %s", __func__, e.what());
2297 }
2298
2299 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2300 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2301
2302 ActivationFn activationFunction;
2303 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2304 {
2305 return Fail("%s: Operation has invalid inputs", __func__);
2306 }
2307
2308 armnn::FullyConnectedDescriptor desc;
2309 desc.m_TransposeWeightMatrix = true;
2310 desc.m_BiasEnabled = true;
2311
2312 bool isSupported = false;
2313 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2314 IsFullyConnectedSupported,
2315 data.m_Backends,
2316 isSupported,
2317 reshapedInfo,
2318 outputInfo,
2319 weights.GetInfo(),
2320 bias.GetInfo(),
2321 desc);
2322 if (!isSupported)
2323 {
2324 return false;
2325 }
2326
2327 armnn::IConnectableLayer* startLayer =
2328 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2329 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2330
2331 if (endLayer != nullptr)
2332 {
2333 if (inputInfo.GetNumDimensions() > 2U)
2334 {
2335 armnn::ReshapeDescriptor reshapeDescriptor;
2336 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2337
2338 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2339 assert(reshapeLayer != nullptr);
2340 input.Connect(reshapeLayer->GetInputSlot(0));
2341 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2342 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2343 }
2344 else
2345 {
2346 input.Connect(startLayer->GetInputSlot(0));
2347 }
2348
2349 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2350 }
2351 else
2352 {
2353 return Fail("%s: ProcessActivation failed", __func__);
2354 }
2355}
2356
2357template<typename HalPolicy,
2358 typename Operation = typename HalPolicy::Operation,
2359 typename Model = typename HalPolicy::Model>
2360bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2361{
Mike Kelly999e2092019-08-15 10:46:46 +01002362 if (operation.inputs.size() != 1)
2363 {
2364 return Fail("%s: Optional inputs are not supported", __func__);
2365 }
2366
Mike Kelly46272802019-08-14 17:00:48 +01002367 using Operand = typename HalPolicy::Operand;
2368
2369 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2370 if (!input.IsValid())
2371 {
2372 return Fail("%s: Operation has invalid inputs", __func__);
2373 }
2374
2375 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2376 if (!output)
2377 {
2378 return Fail("%s: Could not read output 0", __func__);
2379 }
2380
2381 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2382 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2383
2384 if (IsDynamicTensor(outputInfo))
2385 {
2386 return Fail("%s: Dynamic output tensors are not supported", __func__);
2387 }
2388 if (outputInfo.GetNumDimensions() != 4u)
2389 {
2390 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2391 }
2392
2393 armnn::L2NormalizationDescriptor desc;
2394 desc.m_DataLayout = armnn::DataLayout::NHWC;
2395
2396 bool isSupported = false;
2397 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2398 IsL2NormalizationSupported,
2399 data.m_Backends,
2400 isSupported,
2401 inputInfo,
2402 outputInfo,
2403 desc);
2404 if (!isSupported)
2405 {
2406 return false;
2407 }
2408
2409 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2410 assert(layer != nullptr);
2411 input.Connect(layer->GetInputSlot(0));
2412
2413 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2414}
2415
2416template<typename HalPolicy,
2417 typename Operation = typename HalPolicy::Operation,
2418 typename Model = typename HalPolicy::Model>
2419bool ConvertLocalResponseNormalization(const Operation& operation,
2420 const Model& model,
2421 ConversionData& data)
2422{
Mike Kelly999e2092019-08-15 10:46:46 +01002423 if (operation.inputs.size() != 5)
2424 {
2425 return Fail("%s: Optional inputs are not supported", __func__);
2426 }
2427
Mike Kelly46272802019-08-14 17:00:48 +01002428 using Operand = typename HalPolicy::Operand;
2429 using OperandType = typename HalPolicy::OperandType;
2430
2431 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2432 if (!input.IsValid())
2433 {
2434 return Fail("%s: Operation has invalid inputs", __func__);
2435 }
2436
2437 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2438 if (!output)
2439 {
2440 return Fail("%s: Could not read output 0", __func__);
2441 }
2442
2443 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2444 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2445
2446 if (IsDynamicTensor(outputInfo))
2447 {
2448 return Fail("%s: Dynamic output tensors are not supported", __func__);
2449 }
2450 if (outputInfo.GetNumDimensions() != 4u)
2451 {
2452 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2453 }
2454
2455 armnn::NormalizationDescriptor descriptor;
2456 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2457 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2458 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2459
2460 if (!input.IsValid() ||
2461 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2462 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2463 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2464 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2465 {
2466 return Fail("%s: Operation has invalid inputs", __func__);
2467 }
2468
2469 // ArmNN expects normSize to be the full size of the normalization
2470 // window rather than the radius as in AndroidNN.
2471 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2472
2473 bool isSupported = false;
2474 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2475 IsNormalizationSupported,
2476 data.m_Backends,
2477 isSupported,
2478 inputInfo,
2479 outputInfo,
2480 descriptor);
2481 if (!isSupported)
2482 {
2483 return false;
2484 }
2485
2486
2487 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2488 assert(layer != nullptr);
2489 input.Connect(layer->GetInputSlot(0));
2490
2491 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2492}
2493
2494template<typename HalPolicy,
2495 typename Operation = typename HalPolicy::Operation,
2496 typename Model = typename HalPolicy::Model>
2497bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2498{
2499 using Operand = typename HalPolicy::Operand;
2500
2501 armnn::ActivationDescriptor desc;
2502 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2503
2504 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2505}
2506
2507template<typename HalPolicy,
2508 typename Operation = typename HalPolicy::Operation,
2509 typename Model = typename HalPolicy::Model>
2510bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2511{
2512 using Operand = typename HalPolicy::Operand;
2513
2514 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2515 if (!input.IsValid())
2516 {
2517 return Fail("%s: Operation has invalid inputs", __func__);
2518 }
2519
2520 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2521 if (!output)
2522 {
2523 return Fail("%s: Could not read output 0", __func__);
2524 }
2525
2526 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2527 if (IsDynamicTensor(outputInfo))
2528 {
2529 return Fail("%s: Dynamic output tensors are not supported", __func__);
2530 }
2531
2532 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2533 if (!axisOperand)
2534 {
2535 return Fail("%s: Could not read input 1", __func__);
2536 }
2537
2538 std::vector<int32_t> axis;
2539 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2540 {
2541 return Fail("%s: Input 1 has invalid values", __func__);
2542 }
2543
2544 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2545
2546 // Convert the axis to unsigned int and remove duplicates.
2547 unsigned int rank = inputInfo.GetNumDimensions();
2548 std::set<unsigned int> uniqueAxis;
2549 std::transform(axis.begin(), axis.end(),
2550 std::inserter(uniqueAxis, uniqueAxis.begin()),
2551 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2552
2553 // Get the "keep dims" flag.
2554 int32_t keepDims = 0;
2555 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2556 {
2557 return Fail("%s: Could not read input 2", __func__);
2558 }
2559
2560 armnn::MeanDescriptor descriptor;
2561 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2562 descriptor.m_KeepDims = keepDims > 0;
2563
2564 bool isSupported = false;
2565 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2566 IsMeanSupported,
2567 data.m_Backends,
2568 isSupported,
2569 inputInfo,
2570 outputInfo,
2571 descriptor);
2572 if (!isSupported)
2573 {
2574 return false;
2575 }
2576
2577 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2578 assert(layer != nullptr);
2579 input.Connect(layer->GetInputSlot(0));
2580
2581 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2582}
2583
2584template<typename HalPolicy,
2585 typename Operation = typename HalPolicy::Operation,
2586 typename Model = typename HalPolicy::Model>
2587bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2588{
2589 using Operand = typename HalPolicy::Operand;
2590
2591 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2592 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2593
2594 if (!input0.IsValid() || !input1.IsValid())
2595 {
2596 return Fail("%s: Operation has invalid inputs", __func__);
2597 }
2598
2599 // The FuseActivation parameter is always the input index 2
2600 // and it should be optional
2601 ActivationFn activationFunction;
2602 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2603 {
2604 return Fail("%s: Operation has invalid inputs", __func__);
2605 }
2606
2607 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2608
2609 if (outputOperand == nullptr)
2610 {
2611 return false;
2612 }
2613
2614 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2615 if (IsDynamicTensor(outputInfo))
2616 {
2617 return Fail("%s: Dynamic output tensors are not supported", __func__);
2618 }
2619
2620 bool isSupported = false;
2621 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2622 IsMultiplicationSupported,
2623 data.m_Backends,
2624 isSupported,
2625 input0.GetTensorInfo(),
2626 input1.GetTensorInfo(),
2627 outputInfo);
2628 if (!isSupported)
2629 {
2630 return false;
2631 }
2632
2633 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2634 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2635
2636 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2637 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2638
2639 if (endLayer != nullptr)
2640 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002641 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2642 if (!isReshapeSupported)
2643 {
2644 return false;
2645 }
2646
Mike Kelly46272802019-08-14 17:00:48 +01002647 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2648 }
2649 else
2650 {
2651 return Fail("%s: ProcessActivation failed", __func__);
2652 }
2653}
2654
2655template<typename HalPolicy,
2656 typename Operation = typename HalPolicy::Operation,
2657 typename Model = typename HalPolicy::Model>
2658bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2659{
2660 using Operand = typename HalPolicy::Operand;
2661
Mike Kelly3c673942019-07-25 09:26:06 +01002662 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2663 if (!input.IsValid())
2664 {
2665 return Fail("%s: Operation has invalid inputs", __func__);
2666 }
2667
2668 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2669 unsigned int rank = inputInfo.GetNumDimensions();
2670
2671 armnn::PadDescriptor descriptor;
2672 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2673 {
2674 return Fail("%s: Could not convert paddings", __func__);
2675 }
2676
2677 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2678 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2679 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2680 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2681 {
2682 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2683 }
2684
Mike Kelly46272802019-08-14 17:00:48 +01002685 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002686 if (!output)
2687 {
2688 return Fail("%s: Could not read output", __func__);
2689 }
2690
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002691 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002692 if (IsDynamicTensor(outputInfo))
2693 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002694 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002695 }
2696
2697 bool isSupported = false;
2698 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2699 IsPadSupported,
2700 data.m_Backends,
2701 isSupported,
2702 inputInfo,
2703 outputInfo,
2704 descriptor);
2705 if (!isSupported)
2706 {
2707 return false;
2708 }
2709
2710 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2711 assert(layer != nullptr);
2712 input.Connect(layer->GetInputSlot(0));
2713 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2714
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002715 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002716}
2717
Mike Kelly0a879362019-07-29 16:56:31 +01002718template<typename HalPolicy,
2719 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002720 typename Model = typename HalPolicy::Model>
2721bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2722{
2723 using Operand = typename HalPolicy::Operand;
2724
2725 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2726 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2727 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2728
2729 if (inputOperand == nullptr
2730 || requestedShapeOperand == nullptr
2731 || outputOperand == nullptr)
2732 {
2733 return Fail("%s: Operation has invalid inputs", __func__);
2734 }
2735
2736 if (requestedShapeOperand->dimensions.size() != 1)
2737 {
2738 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2739 __func__, requestedShapeOperand->dimensions.size());
2740 }
2741
2742 std::vector<int32_t> targetDimensions;
2743 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2744 {
2745 return Fail("%s: Could not read values of input 1", __func__);
2746 }
2747
2748 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2749
2750 Shape requestedShape;
2751 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2752 // function that resolves these values into a fully specified tensor shape.
2753 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2754 {
2755 return Fail("%s: Failed to resolve the requested shape", __func__);
2756 }
2757
2758 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2759 if (!SameShape(requestedShape, outputOperandShape))
2760 {
2761 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2762 }
2763
2764 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2765 if (!input.IsValid())
2766 {
2767 return Fail("%s: Could not read input 0", __func__);
2768 }
2769
2770 armnn::ReshapeDescriptor reshapeDescriptor;
2771 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2772 requestedShape.dimensions.data());
2773
2774 bool isSupported = false;
2775 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2776 IsReshapeSupported,
2777 data.m_Backends,
2778 isSupported,
2779 input.GetTensorInfo(),
2780 reshapeDescriptor);
2781 if (!isSupported)
2782 {
2783 return false;
2784 }
2785
2786 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2787 assert(layer != nullptr);
2788 input.Connect(layer->GetInputSlot(0));
2789
2790 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2791}
2792
2793template<typename HalPolicy,
2794 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002795 typename Model = typename HalPolicy::Model>
2796bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2797{
Mike Kelly46272802019-08-14 17:00:48 +01002798 using Operand = typename HalPolicy::Operand;
2799
Mike Kelly0a879362019-07-29 16:56:31 +01002800 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2801 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2802
2803 if (!input0.IsValid() || !input1.IsValid())
2804 {
2805 return Fail("%s: Operation has invalid inputs", __func__);
2806 }
2807
2808 // The FuseActivation parameter is always the input index 2
2809 // and it should be optional
2810 ActivationFn activationFunction;
2811 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2812 {
2813 return Fail("%s: Operation has invalid inputs", __func__);
2814 }
2815
2816 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2817 if (!output)
2818 {
2819 return Fail("%s: Could not read output 0", __func__);
2820 }
2821
2822 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2823 if (IsDynamicTensor(outputInfo))
2824 {
2825 return Fail("%s: Dynamic output tensors are not supported", __func__);
2826 }
2827
2828 bool isSupported = false;
2829 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2830 IsSubtractionSupported,
2831 data.m_Backends,
2832 isSupported,
2833 input0.GetTensorInfo(),
2834 input1.GetTensorInfo(),
2835 outputInfo);
2836 if (!isSupported)
2837 {
2838 return false;
2839 }
2840
2841 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2842 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2843
2844 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2845 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2846
2847 if (endLayer)
2848 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002849 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2850 if (!isReshapeSupported)
2851 {
2852 return false;
2853 }
Mike Kelly0a879362019-07-29 16:56:31 +01002854 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2855 }
2856
2857 return Fail("%s: ProcessActivation failed", __func__);
2858}
2859
Finn Williams23b87b32019-07-30 11:44:05 +01002860template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002861 typename Operation = typename HalPolicy::Operation,
2862 typename Model = typename HalPolicy::Model>
2863bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2864{
2865 using Operand = typename HalPolicy::Operand;
2866
2867 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2868 if (!input.IsValid())
2869 {
2870 return Fail("%s: Operation has invalid inputs", __func__);
2871 }
2872
2873 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2874 unsigned int rank = inputInfo.GetNumDimensions();
2875 if (rank > 4)
2876 {
2877 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2878 }
2879
2880 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2881 if (!output)
2882 {
2883 return Fail("%s: Could not read output 0", __func__);
2884 }
2885
2886 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2887 {
2888 return Fail("%s: Dynamic output tensors are not supported", __func__);
2889 }
2890
2891 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2892 // if the operand index is out of bounds.
2893 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2894
2895 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2896
2897 std::vector<int32_t> axis;
2898 if (!axisOperand)
2899 {
2900 axis.assign(dimensionSequence,
2901 dimensionSequence + rank);
2902 }
2903 else
2904 {
2905 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2906 }
2907
2908 std::vector<uint32_t> outputDims;
2909 for (unsigned int i = 0; i < rank; i++)
2910 {
2911 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2912 auto currentDimension = inputInfo.GetShape()[i];
2913 if (skipSqueeze || currentDimension != 1)
2914 {
2915 outputDims.push_back(currentDimension);
2916 }
2917 }
2918
2919 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2920
2921 armnn::TensorInfo outputInfo = inputInfo;
2922 outputInfo.SetShape(outShape);
2923
2924 armnn::ReshapeDescriptor reshapeDesc;
2925 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2926
2927 bool isSupported = false;
2928 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2929 IsReshapeSupported,
2930 data.m_Backends,
2931 isSupported,
2932 inputInfo,
2933 reshapeDesc);
2934 if (!isSupported)
2935 {
2936 return false;
2937 }
2938
2939 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2940 assert(layer != nullptr);
2941 input.Connect(layer->GetInputSlot(0));
2942
2943 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2944}
2945
2946template<typename HalPolicy,
2947 typename Operation = typename HalPolicy::Operation,
2948 typename Model = typename HalPolicy::Model>
2949bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2950{
2951 using Operand = typename HalPolicy::Operand;
2952
2953 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2954 if (!input.IsValid())
2955 {
2956 return Fail("%s: Operation has invalid inputs", __func__);
2957 }
2958
2959 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2960 unsigned int rank = inputInfo.GetNumDimensions();
2961 if (rank > 4)
2962 {
2963 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2964 }
2965
2966 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2967 if (!output)
2968 {
2969 return Fail("%s: Could not read output 0", __func__);
2970 }
2971
2972 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2973 if (IsDynamicTensor(outputInfo))
2974 {
2975 return Fail("%s: Dynamic output tensors are not supported", __func__);
2976 }
2977
2978 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2979 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2980 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
2981
2982 std::vector<int32_t> beginValues;
2983 std::vector<int32_t> endValues;
2984 std::vector<int32_t> stridesValues;
2985
2986 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
2987 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
2988 {
2989 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
2990 {
2991 return false;
2992 }
2993
2994 if (operandValues.size() != rank)
2995 {
2996 return false;
2997 }
2998
2999 return true;
3000 };
3001
3002 if (!ValidateInputOperands(*beginOperand, beginValues)
3003 || !ValidateInputOperands(*endOperand, endValues)
3004 || !ValidateInputOperands(*stridesOperand, stridesValues))
3005 {
3006 return Fail("%s: Operation has invalid input operand", __func__);
3007 }
3008
3009 // Stride cannot have value '0'
3010 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3011 {
3012 return Fail("%s: Stride must be non-zero value.", __func__);
3013 }
3014
3015 armnn::StridedSliceDescriptor descriptor;
3016 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3017 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3018 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3019 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3020
3021 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3022 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3023 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3024 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3025 {
3026 return Fail("%s: Operation has invalid inputs", __func__);
3027 }
3028
3029 bool isSupported = false;
3030 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3031 IsStridedSliceSupported,
3032 data.m_Backends,
3033 isSupported,
3034 inputInfo,
3035 outputInfo,
3036 descriptor);
3037 if (!isSupported)
3038 {
3039 return false;
3040 }
3041
3042 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3043 assert(layer != nullptr);
3044 input.Connect(layer->GetInputSlot(0));
3045
3046 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3047}
3048
3049template<typename HalPolicy,
3050 typename Operation = typename HalPolicy::Operation,
3051 typename Model = typename HalPolicy::Model>
3052bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3053{
3054 using Operand = typename HalPolicy::Operand;
3055
3056 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3057 if (!input.IsValid())
3058 {
3059 return Fail("%s: Operation has invalid inputs", __func__);
3060 }
3061
3062 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3063 unsigned int rank = inputInfo.GetNumDimensions();
3064 if (rank > 4)
3065 {
3066 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3067 }
3068
3069 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3070 // if the operand index is out of bounds.
3071 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3072
3073 std::vector<int32_t> perm(rank);
3074 if (!permOperand)
3075 {
3076 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3077 for (unsigned int i = rank; i > 0; i--)
3078 {
3079 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3080 }
3081 }
3082 else
3083 {
3084 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3085 }
3086
3087 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3088
3089 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3090 if (!permutationVector.IsEqual(NHWCToArmNN)
3091 && !permutationVector.IsEqual(ArmNNToNHWC)
3092 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3093 {
3094 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3095 }
3096
3097 armnn::PermuteDescriptor permuteDesc;
3098 permuteDesc.m_DimMappings = permutationVector;
3099
3100 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3101 if (!output)
3102 {
3103 return Fail("%s: Could not read output 0", __func__);
3104 }
3105
3106 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3107
3108 bool isSupported = false;
3109 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3110 IsPermuteSupported,
3111 data.m_Backends,
3112 isSupported,
3113 inputInfo,
3114 outputInfo,
3115 permuteDesc);
3116 if (!isSupported)
3117 {
3118 return false;
3119 }
3120
3121 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3122 assert(layer != nullptr);
3123 input.Connect(layer->GetInputSlot(0));
3124
3125 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3126}
3127
3128template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003129 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003130 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003131 typename HalModel = typename HalPolicy::Model>
3132bool ConvertBatchToSpaceNd(const HalOperation& operation,
3133 const HalModel& model,
3134 ConversionData& data)
3135{
Finn Williams23b87b32019-07-30 11:44:05 +01003136
3137 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3138 if (!input.IsValid())
3139 {
3140 return Fail("%s: Operation has invalid inputs", __func__);
3141 }
3142
3143 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3144 if (!output)
3145 {
3146 return Fail("%s: Could not read output 0", __func__);
3147 }
3148
3149 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3150 if (IsDynamicTensor(outputInfo))
3151 {
3152 return Fail("%s: Dynamic output tensors are not supported", __func__);
3153 }
3154
3155 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3156 if (!blockOperand)
3157 {
3158 return Fail("%s: Could not read input 1", __func__);
3159 }
3160
3161 // Convert the block operand to int32
3162 std::vector<int32_t> block;
3163 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3164 {
3165 return Fail("%s: Input 1 has invalid values", __func__);
3166 }
3167
3168 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3169
3170 unsigned int rank = inputInfo.GetNumDimensions();
3171 if (rank != 4)
3172 {
3173 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3174 }
3175
3176 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3177 {
3178 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3179 " greater than or equal to 1", __func__);
3180 }
3181
3182 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3183 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3184 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3185
3186 if (Is12Operand(*output))
3187 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003188 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003189 }
3190 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3191 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3192
3193 bool isSupported = false;
3194 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3195 IsBatchToSpaceNdSupported,
3196 data.m_Backends,
3197 isSupported,
3198 inputInfo,
3199 outputInfo,
3200 batchToSpaceNdDesc);
3201 if (!isSupported)
3202 {
3203 return false;
3204 }
3205
3206 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3207 assert(layer != nullptr);
3208 input.Connect(layer->GetInputSlot(0));
3209
3210 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3211}
Mike Kelly0a879362019-07-29 16:56:31 +01003212
Finn Williamsd74c5052019-07-30 17:06:00 +01003213template<typename HalPolicy,
3214 typename HalOperation = typename HalPolicy::Operation,
3215 typename HalOperand = typename HalPolicy::Operand,
3216 typename HalModel = typename HalPolicy::Model>
3217bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3218{
3219 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3220 if (!input.IsValid())
3221 {
3222 return Fail("%s: Operation has invalid inputs", __func__);
3223 }
3224
3225 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3226 unsigned int rank = inputInfo.GetNumDimensions();
3227 unsigned int spatialDim = rank - 2;
3228
3229 if (rank != 4)
3230 {
3231 Fail("%s: Only inputs with rank 4 are supported", __func__);
3232 }
3233
3234 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3235 if (!output)
3236 {
3237 return Fail("%s: Could not read output 0", __func__);
3238 }
3239
3240 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3241 if (IsDynamicTensor(outputInfo))
3242 {
3243 return Fail("%s: Dynamic output tensors are not supported", __func__);
3244 }
3245
3246 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3247 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3248
3249 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3250 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3251 {
3252 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3253 }
3254
3255 std::vector<int32_t> blockShape;
3256 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3257 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3258 {
3259 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3260 }
3261
3262 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3263 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3264 {
3265 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3266 }
3267
3268 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3269 std::vector<int32_t> paddings;
3270 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3271 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3272 {
3273 int paddingBeforeInput = paddings[i];
3274 int paddingAfterInput = paddings[i + 1];
3275 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3276 {
3277 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3278 }
3279
3280 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3281 }
3282
3283 armnn::SpaceToBatchNdDescriptor descriptor;
3284 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3285 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3286 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3287
3288 if (Is12Operand(*output))
3289 {
3290 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3291 }
3292
3293 bool isSupported = false;
3294 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3295 IsSpaceToBatchNdSupported,
3296 data.m_Backends,
3297 isSupported,
3298 inputInfo,
3299 outputInfo,
3300 descriptor);
3301 if (!isSupported)
3302 {
3303 return false;
3304 }
3305
3306 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3307 assert(layer != nullptr);
3308 input.Connect(layer->GetInputSlot(0));
3309
3310 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3311}
3312
Kevin May407718f2019-09-09 14:46:41 +01003313template<typename HalPolicy,
3314 typename HalOperation = typename HalPolicy::Operation,
3315 typename HalModel = typename HalPolicy::Model>
3316bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3317{
3318 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3319
3320 if (!input.IsValid())
3321 {
3322 return Fail("%s: Operation has invalid input", __func__);
3323 }
3324
3325 using HalOperand = typename HalPolicy::Operand;
3326 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3327 if (!output)
3328 {
3329 return Fail("%s: Could not read output 0", __func__);
3330 }
3331
3332 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3333 if (IsDynamicTensor(outputInfo))
3334 {
3335 return Fail("%s: Dynamic output tensors are not supported", __func__);
3336 }
3337
3338 bool isSupported = false;
3339 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3340 IsAbsSupported,
3341 data.m_Backends,
3342 isSupported,
3343 input.GetTensorInfo(),
3344 outputInfo);
3345
3346 if (!isSupported)
3347 {
3348 return false;
3349 }
3350
3351 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3352 assert(layer != nullptr);
3353 input.Connect(layer->GetInputSlot(0));
3354
3355 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3356}
3357
3358
saoste01b8471482018-10-10 09:44:51 +01003359} // namespace armnn_driver