blob: 2e4cadd7968349f3e8f20688469b816a95c6a137 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
128 std::string reasonIfUnsupported; \
129 try { \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
134 { \
135 supported = \
136 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
137 if (supported) \
138 { \
139 break; \
140 } \
141 else \
142 { \
143 if (reasonIfUnsupported.size() > 0) \
144 { \
145 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
146 } \
147 else \
148 { \
149 ALOGD("%s: not supported by armnn", funcName); \
150 } \
151 } \
152 } \
153 else \
154 { \
155 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
156 } \
157 } \
158 if (!supported) \
159 { \
160 ALOGD("%s: not supported by any specified backend", funcName); \
161 } \
162 } catch (const armnn::InvalidArgumentException &e) { \
163 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100164 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100165
Mike Kellyb5fdf382019-06-11 16:35:25 +0100166template<typename Operand>
167armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100168{
169 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
170}
171
Matthew Bentham912b3622019-05-03 15:49:14 +0100172inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100173{
Matthew Bentham912b3622019-05-03 15:49:14 +0100174 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
175 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
176 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100177}
178
Mike Kellyb5fdf382019-06-11 16:35:25 +0100179#ifdef ARMNN_ANDROID_NN_V1_2
180
181inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
182{
183 return type == V1_2::OperandType::BOOL ||
184 type == V1_2::OperandType::TENSOR_FLOAT16 ||
185 type == V1_2::OperandType::TENSOR_FLOAT32 ||
186 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
188 type == V1_2::OperandType::TENSOR_INT32;
189}
190
191#endif
192
193inline bool IsBool(V1_0::Operand)
194{
195 return false;
196}
197
Sadik Armagan61113162019-07-25 09:09:40 +0100198inline bool Is12Operand(V1_0::Operand)
199{
200 return false;
201}
202
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203#ifdef ARMNN_ANDROID_NN_V1_2
204
205inline bool IsBool(V1_2::Operand operand)
206{
207 return operand.type == V1_2::OperandType::BOOL;
208}
209
Sadik Armagan61113162019-07-25 09:09:40 +0100210/// Checks if a operand is 1_2 Operand
211inline bool Is12Operand(V1_2::Operand)
212{
213 return true;
214}
215
Mike Kellyb5fdf382019-06-11 16:35:25 +0100216#endif
217
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100218template<typename LayerHandleType>
219armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
220 armnn::TensorInfo reshapeInfo)
221{
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
224
225 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
226 BOOST_ASSERT(reshapeLayer != nullptr);
227
228 // Attach the input layer to the reshape layer
229 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
230 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
231
232 return *reshapeLayer;
233}
234
Sadik Armagan64b19b52019-08-19 09:49:58 +0100235bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
236 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100237{
238 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100239
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100240 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
241 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
242
243 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
244 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
245
246 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100247 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100248 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
249 input0.Connect(startLayer->GetInputSlot(0));
250 input1.Connect(startLayer->GetInputSlot(1));
251
Sadik Armagan64b19b52019-08-19 09:49:58 +0100252 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100253 }
254
255 // Since the number of dimensions do not match then we need to add degenerate dimensions
256 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
257
258 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
259 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
260 boost::numeric_cast<int>(inputDimensions1));
261
262 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
263 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
264 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
265
266 const armnn::TensorShape& smallShape = smallInfo.GetShape();
267 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
268 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
269 {
270 reshapedDimensions[i] = smallShape[i - sizeDifference];
271 }
272
273 armnn::TensorInfo reshapedInfo = smallInfo;
274 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
275 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100276
277 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
278 armnn::ReshapeDescriptor reshapeDescriptor;
279
280 bool isSupported = false;
281 FORWARD_LAYER_SUPPORT_FUNC(__func__,
282 IsReshapeSupported,
283 data.m_Backends,
284 isSupported,
285 reshapedInfo,
286 reshapeDescriptor);
287 if (!isSupported)
288 {
289 return false;
290 }
291
292 BOOST_ASSERT(data.m_Network != nullptr);
293 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100294
295 if (input0IsSmaller)
296 {
297 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
298 //
299 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100300 // | |
301 // Reshape |
302 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100303 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100304
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
306 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308 else
309 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
311 //
312 // Input0 Input1
313 // | |
314 // | Reshape
315 // \ /
316 // StartLayer
317
arovir01b0717b52018-09-05 17:03:25 +0100318 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100320 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100321
322 return true;
arovir01b0717b52018-09-05 17:03:25 +0100323}
324
325void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
326 android::nn::PaddingScheme scheme)
327{
328 int32_t padHead;
329 int32_t padTail;
330 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
331 outPadHead = boost::numeric_cast<uint32_t>(padHead);
332 outPadTail = boost::numeric_cast<uint32_t>(padTail);
333}
334
Mike Kelly86b36d42019-07-12 16:39:33 +0100335#ifdef ARMNN_ANDROID_NN_V1_2
336
337void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
338 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
339{
340 int32_t padHead;
341 int32_t padTail;
342 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
343 outPadHead = boost::numeric_cast<uint32_t>(padHead);
344 outPadTail = boost::numeric_cast<uint32_t>(padTail);
345}
346
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100347void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
348 int32_t& outPadTail, android::nn::PaddingScheme scheme)
349{
350 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
351}
352
Mike Kelly86b36d42019-07-12 16:39:33 +0100353#endif
354
Matthew Bentham912b3622019-05-03 15:49:14 +0100355Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100356{
357 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100358 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100359 shape.dimensions = operand.dimensions;
360 shape.scale = operand.scale;
361 shape.offset = operand.zeroPoint;
362 return shape;
363}
364
Mike Kelly46272802019-08-14 17:00:48 +0100365#ifdef ARMNN_ANDROID_NN_V1_2
366
367Shape GetOperandShape(const V1_2::Operand& operand)
368{
369 Shape shape;
370 shape.type = OperandType(operand.type);
371 shape.dimensions = operand.dimensions;
372 shape.scale = operand.scale;
373 shape.offset = operand.zeroPoint;
374 return shape;
375}
376
377#endif
378
arovir01b0717b52018-09-05 17:03:25 +0100379// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
380// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100381// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
382// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100383void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
384 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
385{
386 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
387 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
388 {
389 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
390 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
391 {
392 ALOGW("Bias quantization scale has been modified to match input*weights");
393 biasInfo.SetQuantizationScale(expectedBiasScale);
394 }
395 }
396}
397
398// 4D Tensor Permutations
399const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
400const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
401const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
402const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
403
404// 3D Permutation Vectors
405const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
406const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
407const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
408
409template<typename OSlot>
410armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
411 const armnn::PermutationVector& mappings)
412{
413 // Add swizzle layer
414 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
415
416 BOOST_ASSERT(layer != nullptr);
417
418 // Connect input to swizzle layer
419 input.Connect(layer->GetInputSlot(0));
420
421 // Setup swizzled output
422 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
423 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
424
425 return *layer;
426}
427
428void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
429{
430 // Add swizzle layer
431 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
432 // Connect swizzled input to layer
433 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
434}
435
436armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
437{
438 // Add deswizzle layer
439 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
440 return deswizzleLayer;
441}
442
443// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
444armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
445 LayerInputHandle& input,
446 armnn::IConnectableLayer& firstLayer,
447 armnn::IConnectableLayer& lastLayer)
448{
449 SwizzleIn(network, input, firstLayer, 0);
450 return DeswizzleOut(network, lastLayer, 0);
451}
452
453// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
454armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
455 armnn::IConnectableLayer& layer)
456{
457 return SwizzleInDeswizzleOut(network, input, layer, layer);
458}
459
460bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
461 const armnn::TensorShape & outputShape,
462 uint32_t concatDim)
463{
464 // Validate the output shape is correct given the input shapes (which have just been validated)
465 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
466 if (outputShape.GetNumDimensions() != numDimensions)
467 {
468 return Fail("%s: Output shape has wrong number of dimensions", __func__);
469 }
470
471 unsigned int outputSizeAlongConcatenatedDimension = 0;
472 for (unsigned int i = 0; i < inputShapes.size(); i++)
473 {
474 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
475 }
476
477 for (unsigned int i = 0; i < numDimensions; ++i)
478 {
479 if (i == concatDim)
480 {
481 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
482 {
483 return Fail(
484 "%s: Invalid output shape for dimension %d (%d != %d)",
485 __func__,
486 i,
487 outputShape[i],
488 outputSizeAlongConcatenatedDimension);
489 }
490 }
491 else
492 {
493 if (outputShape[i] != inputShapes[0][i])
494 {
495 return Fail("%s: Invalid output shape", __func__);
496 }
497 }
498 }
499
500 return true;
501}
502
503bool RequiresReshape(armnn::TensorShape & inputShape)
504{
505 return inputShape.GetNumDimensions() < 3;
506}
507
arovir01b0717b52018-09-05 17:03:25 +0100508void SwizzleInputs(armnn::INetwork& network,
509 std::vector<LayerInputHandle>& inputs,
510 std::vector<armnn::TensorShape>& inputShapes,
511 const armnn::PermutationVector& mapping)
512{
513 if (!mapping.IsEqual(IdentityPermutation4D))
514 {
515 size_t nInputs = inputs.size();
516 for (size_t i=0; i<nInputs; ++i)
517 {
518 // add swizzle layer
519 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
520 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
521 auto& outputInfo = outputSlot.GetTensorInfo();
522 // replace inputs with the swizzled ones
523 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
524 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
525 }
526 }
527}
528
narpra01f176d5a2018-11-18 20:17:48 +0000529bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
530 int32_t & concatDimension,
531 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100532{
narpra01f176d5a2018-11-18 20:17:48 +0000533 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100534 BOOST_ASSERT(numberOfDimensions >= 3);
535
536 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000537 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
538 // or along dimension 0 or 2 for a 3-D tensor.
539 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100540 {
narpra01f176d5a2018-11-18 20:17:48 +0000541 concatDimension = 1;
542 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
543 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100544 }
narpra01f176d5a2018-11-18 20:17:48 +0000545 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100546 {
narpra01f176d5a2018-11-18 20:17:48 +0000547 concatDimension = 0;
548 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
549 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100550 }
narpra01f176d5a2018-11-18 20:17:48 +0000551 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100552}
553
554} // anonymous namespace
555
556namespace armnn_driver
557{
558
559//// Creates an ArmNN activation layer and connects it to the given layer, if the
560//// passed in AndroidNN activation function requires so.
561//// @return The end layer of the sequence of layers built for the given AndroidNN
562//// activation function or nullptr if an error occurred (e.g. unsupported activation).
563//// Note that the end layer matches the input layer if no activation is required
564//// (the sequence of layers has length 1).
565armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
566 ActivationFn activation,
567 armnn::IConnectableLayer* prevLayer,
568 ConversionData& data);
569
570} // namespace armnn_driver
571
572///
573/// Utility templates
574///
575
576namespace armnn_driver
577{
578
579using namespace android::nn;
580
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581template<typename HalPolicy,
582 typename HalOperand = typename HalPolicy::Operand,
583 typename HalOperation = typename HalPolicy::Operation,
584 typename HalModel = typename HalPolicy::Model>
585const HalOperand* GetInputOperand(const HalOperation& operation,
586 uint32_t inputIndex,
587 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100588 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100589{
590 if (inputIndex >= operation.inputs.size())
591 {
saoste01b8471482018-10-10 09:44:51 +0100592 if (failOnIndexOutOfBounds)
593 {
594 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
595 }
arovir01b0717b52018-09-05 17:03:25 +0100596 return nullptr;
597 }
598
599 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
600 return &model.operands[operation.inputs[inputIndex]];
601}
602
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100603template<typename HalPolicy,
604 typename HalOperand = typename HalPolicy::Operand,
605 typename HalOperation = typename HalPolicy::Operation,
606 typename HalModel = typename HalPolicy::Model>
607const HalOperand* GetOutputOperand(const HalOperation& operation,
608 uint32_t outputIndex,
609 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100610{
611 if (outputIndex >= operation.outputs.size())
612 {
613 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
614 return nullptr;
615 }
616
617 // Model should have been validated beforehand
618 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
619
620 return &model.operands[operation.outputs[outputIndex]];
621}
622
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100623template<typename HalPolicy,
624 typename HalOperand = typename HalPolicy::Operand,
625 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100626const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100627 const HalModel& model,
628 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000629 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100630{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100631 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100632
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100633 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100634 switch (operand.lifetime)
635 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100636 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100637 {
638 // Constant found in model.operandValues
639 valueStart = &model.operandValues[operand.location.offset];
640 break;
641 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100643 {
644 // Constant specified via a Memory object
645 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
646 break;
647 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100648 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000649 {
650 // An optional input tensor with no values is not an error so should not register as a fail
651 if (optional)
652 {
653 valueStart = nullptr;
654 break;
655 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100656 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000657 }
arovir01b0717b52018-09-05 17:03:25 +0100658 default:
659 {
660 // Unsupported/invalid (e.g. can't get value of an input to the model)
661 Fail("%s: unsupported/invalid operand lifetime: %s",
662 __func__, toString(operand.lifetime).c_str());
663 valueStart = nullptr;
664 }
665 }
666
667 return valueStart;
668}
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100671 typename HalOperation = typename HalPolicy::Operation,
672 typename HalModel = typename HalPolicy::Model,
673 typename HalOperandType = typename HalPolicy::OperandType>
674bool GetOperandType(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
677 HalOperandType& type)
678{
679 using HalOperand = typename HalPolicy::Operand;
680
681 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
682 if (!operand)
683 {
684 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
685 }
686
687 type = operand->type;
688 return true;
689}
690
691template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100692 typename HalOperand = typename HalPolicy::Operand,
693 typename HalModel = typename HalPolicy::Model>
694ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
695 const HalModel& model,
696 const ConversionData& data,
697 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
698 const armnn::TensorShape* overrideTensorShape = nullptr,
699 bool optional = false)
700{
701 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
702
703 if (!IsOperandTypeSupportedForTensors(operand.type))
704 {
705 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
706 return ConstTensorPin();
707 }
708
709 if (!optional &&
710 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
711 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
712 operand.lifetime != HalOperandLifeTime::NO_VALUE)
713 {
714 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
715 return ConstTensorPin();
716 }
717
718 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
719 if (!valueStart)
720 {
721 if (optional)
722 {
723 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
724 return ConstTensorPin(true);
725 }
726 // mandatory tensor with no values
727 Fail("%s: failed to get operand address", __func__);
728 return ConstTensorPin();
729 }
730
731 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
732 if (overrideTensorShape != nullptr)
733 {
734 tensorInfo.SetShape(*overrideTensorShape);
735 }
736 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
737}
738
739template<typename HalPolicy,
740 typename HalOperation = typename HalPolicy::Operation,
741 typename HalModel = typename HalPolicy::Model>
742ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
743 uint32_t inputIndex,
744 const HalModel& model,
745 const ConversionData& data,
746 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
747 const armnn::TensorShape* overrideTensorShape = nullptr,
748 bool optional = false)
749{
750 using HalOperand = typename HalPolicy::Operand;
751
752 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
753 if (!operand)
754 {
755 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
756 return ConstTensorPin();
757 }
758 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
759 model,
760 data,
761 dimensionMappings,
762 overrideTensorShape,
763 optional);
764}
765
766template<typename HalPolicy,
767 typename OutputType,
768 typename HalOperandType = typename HalPolicy::OperandType,
769 typename HalOperation = typename HalPolicy::Operation,
770 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100771bool GetInputScalar(const HalOperation& operation,
772 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100773 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100774 OutputType& outValue,
775 const HalModel& model,
776 const ConversionData& data)
777{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100778 using HalOperand = typename HalPolicy::Operand;
779
780 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100781 if (!operand)
782 {
783 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
784 }
785
786 if (operand->type != type)
787 {
788 return Fail("%s: unexpected operand type: %s (should be %s)",
789 __func__, toString(operand->type).c_str(), toString(type).c_str());
790 }
791
792 if (operand->location.length != sizeof(OutputType))
793 {
794 return Fail("%s: incorrect operand location length: %i (should be %i)",
795 __func__, operand->location.length, sizeof(OutputType));
796 }
797
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100798 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100799 if (!valueAddress)
800 {
801 return Fail("%s: failed to get address for operand", __func__);
802 }
803
804 outValue = *(static_cast<const OutputType*>(valueAddress));
805 return true;
806}
807
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808template<typename HalPolicy,
809 typename HalOperation = typename HalPolicy::Operation,
810 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100811bool GetInputInt32(const HalOperation& operation,
812 uint32_t inputIndex,
813 int32_t& outValue,
814 const HalModel& model,
815 const ConversionData& data)
816{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100817 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100818}
819
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820template<typename HalPolicy,
821 typename HalOperation = typename HalPolicy::Operation,
822 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100823bool GetInputFloat32(const HalOperation& operation,
824 uint32_t inputIndex,
825 float& outValue,
826 const HalModel& model,
827 const ConversionData& data)
828{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100829 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100830}
831
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100832template<typename HalPolicy,
833 typename HalOperation = typename HalPolicy::Operation,
834 typename HalOperandType = typename HalPolicy::OperandType,
835 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100836bool GetInputActivationFunctionImpl(const HalOperation& operation,
837 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100838 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100839 ActivationFn& outActivationFunction,
840 const HalModel& model,
841 const ConversionData& data)
842{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100843 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100844 {
845 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
846 __func__,
847 toString(type).c_str(),
848 toString(OperandType::INT32).c_str(),
849 toString(OperandType::TENSOR_INT32).c_str());
850 }
851
852 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100854 {
855 return Fail("%s: failed to get activation input value", __func__);
856 }
857 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
858 return true;
859}
860
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100861template<typename HalPolicy,
862 typename HalOperation = typename HalPolicy::Operation,
863 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100864bool GetInputActivationFunction(const HalOperation& operation,
865 uint32_t inputIndex,
866 ActivationFn& outActivationFunction,
867 const HalModel& model,
868 const ConversionData& data)
869{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100870 return GetInputActivationFunctionImpl<HalPolicy>(operation,
871 inputIndex,
872 HalPolicy::OperandType::INT32,
873 outActivationFunction,
874 model,
875 data);
arovir01b0717b52018-09-05 17:03:25 +0100876}
877
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878template<typename HalPolicy,
879 typename HalOperation = typename HalPolicy::Operation,
880 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100881bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
882 uint32_t inputIndex,
883 ActivationFn& outActivationFunction,
884 const HalModel& model,
885 const ConversionData& data)
886{
887 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100888 return GetInputActivationFunctionImpl<HalPolicy>(operation,
889 inputIndex,
890 HalPolicy::OperandType::INT32,
891 outActivationFunction,
892 model,
893 data);
arovir01b0717b52018-09-05 17:03:25 +0100894}
895
896
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100897template<typename HalPolicy,
898 typename HalOperation = typename HalPolicy::Operation,
899 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100900bool GetOptionalInputActivation(const HalOperation& operation,
901 uint32_t inputIndex,
902 ActivationFn& activationFunction,
903 const HalModel& model,
904 const ConversionData& data)
905{
906 if (operation.inputs.size() <= inputIndex)
907 {
908 activationFunction = ActivationFn::kActivationNone;
909 }
910 else
911 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100912 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100913 {
914 return Fail("%s: Operation has invalid inputs", __func__);
915 }
916 }
917 return true;
918}
919
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100920template<typename HalPolicy,
921 typename ConvolutionDescriptor,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100924bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
925 uint32_t dilationXIndex,
926 ConvolutionDescriptor& descriptor,
927 const HalModel& model,
928 const ConversionData& data)
929{
930 bool success = true;
931 if (operation.inputs.size() >= dilationXIndex + 2)
932 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933 success &= GetInputScalar<HalPolicy>(operation,
934 dilationXIndex,
935 HalPolicy::OperandType::INT32,
936 descriptor.m_DilationX,
937 model,
938 data);
939 success &= GetInputScalar<HalPolicy>(operation,
940 dilationXIndex + 1,
941 HalPolicy::OperandType::INT32,
942 descriptor.m_DilationY,
943 model,
944 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100945 }
946
947 return success;
948}
949
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100950template<typename HalPolicy,
951 typename HalOperand = typename HalPolicy::Operand,
952 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100953bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100954 std::vector<int32_t>& outValues,
955 const HalModel& model,
956 const ConversionData& data)
957{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100959 {
960 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
961 }
962
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100963 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100964 if (!startAddress)
965 {
966 return Fail("%s: failed to get operand address", __func__, operand.type);
967 }
968
969 // Check number of bytes is sensible
970 const uint32_t numBytes = operand.location.length;
971 if (numBytes % sizeof(int32_t) != 0)
972 {
973 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
974 __func__, numBytes, sizeof(int32_t));
975 }
976
977 outValues.resize(numBytes / sizeof(int32_t));
978 memcpy(outValues.data(), startAddress, numBytes);
979 return true;
980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985bool GetInputPaddingScheme(const HalOperation& operation,
986 uint32_t inputIndex,
987 PaddingScheme& outPaddingScheme,
988 const HalModel& model,
989 const ConversionData& data)
990{
991 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100993 {
994 return Fail("%s: failed to get padding scheme input value", __func__);
995 }
996
997 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
998 return true;
999}
1000
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001template<typename HalPolicy,
1002 typename HalOperation = typename HalPolicy::Operation,
1003 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001004LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1005 uint32_t inputIndex,
1006 const HalModel& model,
1007 ConversionData& data)
1008{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001009 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001010 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1012
1013 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001014 if (!operand)
1015 {
1016 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1017 return LayerInputHandle();
1018 }
1019
1020 if (!IsOperandTypeSupportedForTensors(operand->type))
1021 {
1022 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1023 return LayerInputHandle();
1024 }
1025
Sadik Armagan44bcc022019-06-18 17:21:36 +01001026 try
arovir01b0717b52018-09-05 17:03:25 +01001027 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001028 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001029 if (IsDynamicTensor(operandTensorInfo))
1030 {
1031 Fail("%s: dynamic input tensors are not supported", __func__);
1032 return LayerInputHandle();
1033 }
arovir01b0717b52018-09-05 17:03:25 +01001034
Sadik Armagan44bcc022019-06-18 17:21:36 +01001035 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001036 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001037 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001038 {
1039 // NOTE: We must check whether we can support the input tensor on at least one
1040 // of the provided backends; otherwise we cannot convert the operation
1041 bool isInputSupported = false;
1042 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1043 IsInputSupported,
1044 data.m_Backends,
1045 isInputSupported,
1046 operandTensorInfo);
1047
1048 if (!isInputSupported)
1049 {
1050 Fail("%s: unsupported input tensor", __func__);
1051 return LayerInputHandle();
1052 }
1053
1054 BOOST_FALLTHROUGH; // intentional fallthrough
1055 }
1056 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001057 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001058 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001059 // The tensor is either an operand internal to the model, or a model input.
1060 // It can be associated with an ArmNN output slot for an existing layer.
1061
1062 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1063 const uint32_t operandIndex = operation.inputs[inputIndex];
1064 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001065 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001066 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001067 case HalOperandLifeTime::CONSTANT_REFERENCE:
1068 {
1069 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1070 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1071 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001072 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001073 bool isSupported = false;
1074 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1075 IsConstantSupported,
1076 data.m_Backends,
1077 isSupported,
1078 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001079 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001080 {
1081 return LayerInputHandle();
1082 }
1083
1084 armnn::IConnectableLayer* constantLayer =
1085 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1086 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1087 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1088
1089 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1090 }
1091 else
1092 {
1093 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001094 return LayerInputHandle();
1095 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001096 break;
arovir01b0717b52018-09-05 17:03:25 +01001097 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001098 default:
arovir01b0717b52018-09-05 17:03:25 +01001099 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001100 // Unsupported lifetime for an input tensor
1101 Fail("%s: unsupported lifetime for input tensor: %s",
1102 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001103 return LayerInputHandle();
1104 }
arovir01b0717b52018-09-05 17:03:25 +01001105 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001106 }
1107 catch (UnsupportedOperand<HalOperandType>& e)
1108 {
1109 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1110 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001111 }
1112}
1113
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114template<typename HalPolicy,
1115 typename HalOperation = typename HalPolicy::Operation,
1116 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001117bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1118 uint32_t operationOutputIndex,
1119 armnn::IConnectableLayer& layer,
1120 uint32_t layerOutputIndex,
1121 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001122 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001123{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001124 using HalOperand = typename HalPolicy::Operand;
1125
1126 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001127 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1128 {
1129 return false;
1130 }
1131
1132 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1133
1134 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1135 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1136
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001137 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001138
1139 return true;
1140}
1141
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142template<typename HalPolicy,
1143 typename HalOperation = typename HalPolicy::Operation,
1144 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001145armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1146 uint32_t inputIndex,
1147 const HalModel& model,
1148 ConversionData& data)
1149{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 using HalOperand = typename HalPolicy::Operand;
1151
1152 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153 if (!operand)
1154 {
1155 return armnn::DataLayout::NHWC;
1156 }
1157
1158 if (!IsBool(*operand))
1159 {
1160 return armnn::DataLayout::NHWC;
1161 }
1162
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001163 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001164 if (!valueAddress)
1165 {
1166 return armnn::DataLayout::NHWC;
1167 }
1168
1169 if (*(static_cast<const bool*>(valueAddress)))
1170 {
1171 return armnn::DataLayout::NCHW;
1172 }
1173 else
1174 {
1175 return armnn::DataLayout::NHWC;
1176 }
1177}
1178
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001179template<typename HalPolicy,
1180 typename HalOperation = typename HalPolicy::Operation,
1181 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001182bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1183 uint32_t outputIndex,
1184 armnn::IConnectableLayer& layer,
1185 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001186 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001187{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001188 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1189 outputIndex,
1190 layer,
1191 outputIndex,
1192 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001193 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001194}
1195
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196template<typename HalPolicy,
1197 typename HalOperation = typename HalPolicy::Operation,
1198 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001199bool ConvertToActivation(const HalOperation& operation,
1200 const char* operationName,
1201 const armnn::ActivationDescriptor& activationDesc,
1202 const HalModel& model,
1203 ConversionData& data)
1204{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001205 using HalOperand = typename HalPolicy::Operand;
1206
1207 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001208 if (!input.IsValid())
1209 {
1210 return Fail("%s: Input 0 is invalid", operationName);
1211 }
1212
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001213 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001214 if (!outputOperand)
1215 {
1216 return false;
1217 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001218
1219 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001220 if (IsDynamicTensor(outInfo))
1221 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001222 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001223 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001224
1225 bool isSupported = false;
1226 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1227 IsActivationSupported,
1228 data.m_Backends,
1229 isSupported,
1230 input.GetTensorInfo(),
1231 outInfo,
1232 activationDesc);
1233 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001234 {
1235 return false;
1236 }
1237
1238 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1239 BOOST_ASSERT(layer != nullptr);
1240 input.Connect(layer->GetInputSlot(0));
1241
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001242 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001243}
1244
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001245template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001246 typename HalOperation = typename HalPolicy::Operation,
1247 typename HalModel = typename HalPolicy::Model>
1248bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1249{
1250 armnn::ActivationDescriptor desc;
1251 desc.m_Function = armnn::ActivationFunction::ReLu;
1252
1253 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1254}
1255
1256template<typename HalPolicy,
1257 typename HalOperation = typename HalPolicy::Operation,
1258 typename HalModel = typename HalPolicy::Model>
1259bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1260{
1261 armnn::ActivationDescriptor desc;
1262 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1263 desc.m_A = 1.0f;
1264 desc.m_B = -1.0f;
1265
1266 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1267}
1268
1269template<typename HalPolicy,
1270 typename HalOperation = typename HalPolicy::Operation,
1271 typename HalModel = typename HalPolicy::Model>
1272bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1273{
1274 armnn::ActivationDescriptor desc;
1275 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1276 desc.m_A = 6.0f;
1277
1278 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1279}
1280
1281template<typename HalPolicy,
1282 typename HalOperation = typename HalPolicy::Operation,
1283 typename HalModel = typename HalPolicy::Model>
1284bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1285{
1286 armnn::ActivationDescriptor desc;
1287 desc.m_Function = armnn::ActivationFunction::TanH;
1288 desc.m_A = 1.0f; // android nn does not support tanH parameters
1289 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1290
1291 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1292}
1293
1294template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001295 typename HalOperation = typename HalPolicy::Operation,
1296 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001297bool ConvertPaddings(const HalOperation& operation,
1298 const HalModel& model,
1299 ConversionData& data,
1300 unsigned int rank,
1301 armnn::PadDescriptor& padDescriptor)
1302{
1303 using HalOperand = typename HalPolicy::Operand;
1304
1305 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1306 if (!paddingsOperand)
1307 {
1308 return Fail("%s: Could not read paddings operand", __func__);
1309 }
1310
1311 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1312 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1313 {
1314 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1315 }
1316
1317 std::vector<int32_t> paddings;
1318 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1319
1320 // add padding for each dimension of input tensor.
1321 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1322 {
1323 int paddingBeforeInput = paddings[i];
1324 int paddingAfterInput = paddings[i + 1];
1325
1326 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1327 {
1328 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1329 }
1330
1331 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1332 }
1333
1334 return true;
1335}
1336
1337template<typename HalPolicy,
1338 typename HalOperation = typename HalPolicy::Operation,
1339 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001340bool ConvertPooling2d(const HalOperation& operation,
1341 const char* operationName,
1342 armnn::PoolingAlgorithm poolType,
1343 const HalModel& model,
1344 ConversionData& data)
1345{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001346 using HalOperand = typename HalPolicy::Operand;
1347 using HalOperandType = typename HalPolicy::OperandType;
1348
1349 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001350 if (!input.IsValid())
1351 {
1352 return Fail("%s: Could not read input 0", operationName);
1353 }
1354
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001355 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001356 if (!output)
1357 {
1358 return Fail("%s: Could not read output 0", __func__);
1359 }
1360
1361 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1362 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1363
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001364 if (IsDynamicTensor(outputInfo))
1365 {
1366 return Fail("%s: Dynamic output tensors are not supported", __func__);
1367 }
1368
arovir01b0717b52018-09-05 17:03:25 +01001369 armnn::Pooling2dDescriptor desc;
1370 desc.m_PoolType = poolType;
1371 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001372 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001373
1374 ActivationFn activation;
1375
Sadik Armagan15d63e22019-07-26 16:59:35 +01001376 auto inputSize = operation.inputs.size();
1377
1378 if (inputSize >= 10)
1379 {
1380 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1381 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1382 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1383 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1384 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1385 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1386 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1387 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1388 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1389 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1390 {
1391 return Fail("%s: Operation has invalid inputs", operationName);
1392 }
1393
1394 if (Is12Operand(*output))
1395 {
1396 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1397 }
1398 }
1399 else
arovir01b0717b52018-09-05 17:03:25 +01001400 {
1401 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1402 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001403 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1404 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1405 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1406 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1407 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1408 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001409 {
1410 return Fail("%s: Operation has invalid inputs", operationName);
1411 }
1412
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001413 const unsigned int inputWidth = inputInfo.GetShape()[2];
1414 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001415
1416 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1417 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001418
1419 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001420 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001421 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001422 }
1423 }
1424
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001425 bool isSupported = false;
1426 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1427 IsPooling2dSupported,
1428 data.m_Backends,
1429 isSupported,
1430 inputInfo,
1431 outputInfo,
1432 desc);
1433 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001434 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001435 return false;
arovir01b0717b52018-09-05 17:03:25 +01001436 }
arovir01b0717b52018-09-05 17:03:25 +01001437
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001438 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1439 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001440 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001441 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001442 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001443
1444 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1445 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001446 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001447 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001448 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001449
1450 input.Connect(pooling2dLayer->GetInputSlot(0));
1451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001452 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001453}
1454
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001455template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001456 typename Operation = typename HalPolicy::Operation,
1457 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001458bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1459{
1460 using Operand = typename HalPolicy::Operand;
1461
1462 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1463 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1464
1465 if (!input0.IsValid() || !input1.IsValid())
1466 {
1467 return Fail("%s: Operation has invalid inputs", __func__);
1468 }
1469
1470 // The FuseActivation parameter is always the input index 2
1471 // and it should be optional
1472 ActivationFn activationFunction;
1473 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1474 {
1475 return Fail("%s: Operation has invalid inputs", __func__);
1476 }
1477
1478 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1479 if (!outputOperand)
1480 {
1481 return false;
1482 }
1483
1484 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1485 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1486
1487 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1488 if (IsDynamicTensor(outputInfo))
1489 {
1490 return Fail("%s: Dynamic output tensors are not supported", __func__);
1491 }
1492
1493 bool isSupported = false;
1494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1495 IsAdditionSupported,
1496 data.m_Backends,
1497 isSupported,
1498 inputInfo0,
1499 inputInfo1,
1500 outputInfo);
1501 if (!isSupported)
1502 {
1503 return false;
1504 }
1505
1506 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1507 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1508
1509 if (endLayer != nullptr)
1510 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001511 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1512 if (!isReshapeSupported)
1513 {
1514 return false;
1515 }
1516
Mike Kelly46272802019-08-14 17:00:48 +01001517 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1518 }
1519 else
1520 {
1521 return Fail("%s: ProcessActivation failed", __func__);
1522 }
1523}
1524
1525template<typename HalPolicy,
1526 typename Operation = typename HalPolicy::Operation,
1527 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001528bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1529{
1530 using HalOperand = typename HalPolicy::Operand;
1531 using HalOperandType = typename HalPolicy::OperandType;
1532
1533 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1534 if (operation.inputs.size() <= 1)
1535 {
1536 return Fail("%s: Operation has insufficient arguments", __func__);
1537 }
1538
1539 // Get inputs and outputs
1540 const std::size_t numInputTensors = operation.inputs.size() - 1;
1541
1542 int32_t concatDim;
1543 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1544 {
1545 return Fail("%s: Operation has invalid inputs", __func__);
1546 }
1547
1548 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1549 if (!outputOperand)
1550 {
1551 return Fail("%s: Operation has no outputs", __func__);
1552 }
1553
1554
1555 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1556 armnn::TensorShape outputShape = outputInfo.GetShape();
1557
1558 //
1559 // handle negative concat dims along the lines of tensorflow as described here:
1560 // https://www.tensorflow.org/api_docs/python/tf/concat
1561 // "negative axis refers to axis + rank(values)-th dimension"
1562 //
1563 if (concatDim < 0)
1564 {
1565 concatDim += outputShape.GetNumDimensions();
1566 }
1567
1568 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1569 {
1570 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1571 }
1572
1573 std::vector<LayerInputHandle> inputHandles;
1574 std::vector<armnn::TensorShape> inputShapes;
1575
1576 inputHandles.reserve(numInputTensors);
1577 inputShapes.reserve(numInputTensors);
1578
1579 bool inputsHaveBeenReshaped = false;
1580 unsigned int tensorDimensionsAdded = 0;
1581
1582 for (uint32_t i = 0; i < numInputTensors; ++i)
1583 {
1584 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1585 if (!operand)
1586 {
1587 return Fail("%s: Operation has invalid inputs", __func__);
1588 }
1589
1590 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
1591 LayerInputHandle operandInputHandle =
1592 ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1593
1594 if (operandShape.GetNumDimensions() == 0)
1595 {
1596 return Fail("%s: Operands with rank 0 are not supported", __func__);
1597 }
1598
1599 if (RequiresReshape(operandShape))
1600 {
1601 inputsHaveBeenReshaped = true;
1602
1603 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1604
1605 // Expand the tensor to three dimensions
1606 if (operandShape.GetNumDimensions() == 2)
1607 {
1608 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1609 tensorDimensionsAdded = 1;
1610 }
1611 else
1612 {
1613 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1614 tensorDimensionsAdded = 2;
1615 }
1616
1617 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1618 *data.m_Network,
1619 operandInputHandle,
1620 reshapeInfo
1621 );
1622
1623 // Point to the reshape operation rather then the input operation
1624 operandShape = reshapeInfo.GetShape();
1625 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1626 }
1627
1628 inputShapes.emplace_back(operandShape);
1629 inputHandles.emplace_back(operandInputHandle);
1630
1631 if (!inputHandles.back().IsValid())
1632 {
1633 return Fail("%s: Operation has invalid inputs", __func__);
1634 }
1635 }
1636
1637 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1638
1639 if (inputsHaveBeenReshaped)
1640 {
1641 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1642 concatDim += tensorDimensionsAdded;
1643
1644 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1645 if (tensorDimensionsAdded == 1)
1646 {
1647 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1648 }
1649 else if (tensorDimensionsAdded == 2)
1650 {
1651 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1652 }
1653 }
1654
1655 // Check if permutations is required and get the pair of permutations required for the concatenation.
1656 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1657 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1658 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1659
1660 bool needPermute =
1661 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1662
1663 if (needPermute)
1664 {
1665 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1666 }
1667
1668 outputInfo.SetShape(outputShape);
1669
1670 // this is no-op for identity swizzles, otherwise it replaces both
1671 // the handles and shapes with the swizzled layer output handles and shapes
1672 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1673
1674 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1675 armnn::OriginsDescriptor concatDescriptor;
1676
1677 try
1678 {
1679 // The concat descriptor is always created across the only supported concat dimension
1680 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1681 concatDescriptor =
1682 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1683 }
1684 catch (const armnn::Exception& error)
1685 {
1686 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1687 }
1688
1689 // Validate the output shape is correct given the input shapes based on the
1690 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1691 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1692 {
1693 return Fail("%s: Error validating the output shape for concat", __func__);
1694 }
1695
1696 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1697 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1698 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1699
1700 bool isSupported = false;
1701 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1702 IsConcatSupported,
1703 data.m_Backends,
1704 isSupported,
1705 inputTensorInfos,
1706 outputInfo,
1707 concatDescriptor);
1708 if (!isSupported)
1709 {
1710 return false;
1711 }
1712
1713 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1714 assert(layer != nullptr);
1715 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1716
1717 // Connect inputs to the layer
1718 const int numInputSlots = layer->GetNumInputSlots();
1719 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1720 for (int i = 0; i < numInputSlots; ++i)
1721 {
1722 // connect the input directly to the merge (concat) layer
1723 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1724 }
1725
1726 if (needPermute)
1727 {
1728 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1729 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1730 layer->GetOutputSlot(0),
1731 permutationPair.second);
1732 layer = &deswizzleLayer;
1733 }
1734
1735 if (inputsHaveBeenReshaped)
1736 {
1737 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1738
1739 // Undo the reshape knowing the amount of dimensions added
1740 if (tensorDimensionsAdded == 1)
1741 {
1742 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1743 afterConcatInfo.GetShape()[2] }));
1744 }
1745 else if (tensorDimensionsAdded == 2)
1746 {
1747 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1748 }
1749
1750 layer = &AddReshapeLayer(
1751 *data.m_Network,
1752 layer->GetOutputSlot(0),
1753 afterConcatInfo
1754 );
1755 }
1756
1757 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1758}
1759
1760template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001761 typename HalOperation = typename HalPolicy::Operation,
1762 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001763bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1764{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001765 using HalOperand = typename HalPolicy::Operand;
1766 using HalOperandType = typename HalPolicy::OperandType;
1767
1768 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001769 if (!input.IsValid())
1770 {
1771 return Fail("%s: Operation has invalid inputs", __func__);
1772 }
1773
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001774 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001775 if (!output)
1776 {
1777 return Fail("%s: Could not read output 0", __func__);
1778 }
1779
1780 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001781 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001782
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001783 if (IsDynamicTensor(outputInfo))
1784 {
1785 return Fail("%s: Dynamic output tensors are not supported", __func__);
1786 }
1787
Mike Kellyb5fdf382019-06-11 16:35:25 +01001788 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001789 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1790 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001791
1792 if (!weightsPin.IsValid() || !biasPin.IsValid())
1793 {
1794 return Fail("%s: Operation has invalid inputs", __func__);
1795 }
1796
1797 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001798 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001799 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1800
1801 armnn::Convolution2dDescriptor desc;
1802 desc.m_DataLayout = armnn::DataLayout::NHWC;
1803 ActivationFn activation;
1804
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001805 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001806 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001807 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1808 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1809 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1810 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1811 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1812 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001813 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001814 {
1815 return Fail("%s: Operation has invalid inputs", __func__);
1816 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001817 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001818 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001819 {
1820 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001821 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1822 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1823 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001824 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001825 {
1826 return Fail("%s: Operation has invalid inputs", __func__);
1827 }
1828
1829 const uint32_t kernelX = weights.GetShape()[2];
1830 const uint32_t kernelY = weights.GetShape()[1];
1831 const uint32_t inputX = inputInfo.GetShape()[2];
1832 const uint32_t inputY = inputInfo.GetShape()[1];
1833
1834 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1835 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001836 }
1837 else
1838 {
1839 return Fail("%s: Unsupported number of operation inputs", __func__);
1840 }
1841
1842 desc.m_BiasEnabled = true;
1843 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1844
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001845 bool isSupported = false;
1846 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1847 IsConvolution2dSupported,
1848 data.m_Backends,
1849 isSupported,
1850 inputInfo,
1851 outputInfo,
1852 desc,
1853 weights.GetInfo(),
1854 biases);
1855 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001856 {
1857 return false;
1858 }
1859
1860 armnn::IConnectableLayer* startLayer =
1861 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1862
1863 if (!startLayer)
1864 {
1865 return Fail("%s: AddConvolution2dLayer failed", __func__);
1866 }
1867
1868 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1869
1870 if (!endLayer)
1871 {
1872 return Fail("%s: ProcessActivation failed", __func__);
1873 }
1874
1875 input.Connect(startLayer->GetInputSlot(0));
1876
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001877 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001878}
1879
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001880template<typename HalPolicy,
1881 typename HalOperation = typename HalPolicy::Operation,
1882 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01001883bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
1884{
1885 using HalOperand = typename HalPolicy::Operand;
1886 using HalOperandType = typename HalPolicy::OperandType;
1887
1888 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1889 if (!input.IsValid() )
1890 {
1891 return Fail("%s: Operation has invalid inputs", __func__);
1892 }
1893
1894 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1895 unsigned int rank = inputInfo.GetNumDimensions();
1896 if (rank != 4)
1897 {
1898 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1899 }
1900
1901 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1902 if (!output)
1903 {
1904 return Fail("%s: Could not read output 0", __func__);
1905 }
1906
1907 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1908 if (IsDynamicTensor(outputInfo))
1909 {
1910 return Fail("%s: Dynamic output tensors are not supported", __func__);
1911 }
1912
1913 armnn::DepthToSpaceDescriptor descriptor;
1914
1915 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
1916 if (descriptor.m_BlockSize <= 1)
1917 {
1918 return Fail("%s: Block size must be at least 1 in all dimensions");
1919 }
1920
1921 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
1922 if (Is12Operand(*output))
1923 {
1924 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
1925 }
1926
1927 bool isSupported = false;
1928 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1929 IsDepthToSpaceSupported,
1930 data.m_Backends,
1931 isSupported,
1932 inputInfo,
1933 outputInfo,
1934 descriptor);
1935 if (!isSupported)
1936 {
1937 return false;
1938 }
1939
1940 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1941 assert(layer != nullptr);
1942 input.Connect(layer->GetInputSlot(0));
1943
1944 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1945}
1946
1947template<typename HalPolicy,
1948 typename HalOperation = typename HalPolicy::Operation,
1949 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001950bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1951{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001952 using HalOperand = typename HalPolicy::Operand;
1953 using HalOperandType = typename HalPolicy::OperandType;
1954
1955 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001956
1957 if (!input.IsValid())
1958 {
1959 return Fail("%s: Operation has invalid inputs", __func__);
1960 }
1961
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001962 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001963
1964 if (!output)
1965 {
1966 return Fail("%s: Could not read output 0", __func__);
1967 }
1968
1969 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001970 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001971
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001972 if (IsDynamicTensor(outputInfo))
1973 {
1974 return Fail("%s: Dynamic output tensors are not supported", __func__);
1975 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001976
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001977 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001978 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001979 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001980
1981 if (weightsOperand == nullptr)
1982 {
1983 return Fail("%s: Operand is invalid", __func__);
1984 }
1985 armnn::DepthwiseConvolution2dDescriptor desc;
1986 desc.m_DataLayout = armnn::DataLayout::NHWC;
1987
Mike Kellyb5fdf382019-06-11 16:35:25 +01001988 // Reinterpret weight data as [ H, W, I, M ]
1989 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1990 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001991 inputInfo.GetShape()[3],
1992 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001993
1994 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1995 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1996
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001997 const ConstTensorPin weightsPin =
1998 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1999 1,
2000 model,
2001 data,
2002 HWIMToMIHW,
2003 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002004
2005 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002006 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002007
2008 if (!weightsPin.IsValid() || !biasPin.IsValid())
2009 {
2010 return Fail("%s: Operation has invalid inputs", __func__);
2011 }
2012
2013 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2014 armnn::ConstTensor bias = biasPin.GetConstTensor();
2015 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2016
2017 ActivationFn activation;
2018
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002019 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002020 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002021 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2022 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2023 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2024 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2025 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2026 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002027 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002028 {
2029 return Fail("%s: Operation has invalid inputs", __func__);
2030 }
2031 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002032 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002033 {
2034 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002035 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2036 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2037 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002038 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002039 {
2040 return Fail("%s: Operation has invalid inputs", __func__);
2041 }
2042
2043 const uint32_t kernelX = weights.GetShape()[3];
2044 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002045 const uint32_t inputX = inputInfo.GetShape()[2];
2046 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002047
2048 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2049 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2050 }
2051 else
2052 {
2053 return Fail("%s: Unsupported number of operation inputs", __func__);
2054 }
2055
2056 desc.m_BiasEnabled = true;
2057 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2058
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002059 bool isSupported = false;
2060 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2061 IsDepthwiseConvolutionSupported,
2062 data.m_Backends,
2063 isSupported,
2064 inputInfo,
2065 outputInfo,
2066 desc,
2067 weights.GetInfo(),
2068 biases);
2069 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002070 {
2071 return false;
2072 }
2073
2074 armnn::IConnectableLayer* startLayer =
2075 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2076 if (!startLayer)
2077 {
2078 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2079 }
2080
2081 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2082 if (!endLayer)
2083 {
2084 return Fail("%s: ProcessActivation failed", __func__);
2085 }
2086
2087 input.Connect(startLayer->GetInputSlot(0));
2088
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002089 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002090}
2091
Mike Kelly3c673942019-07-25 09:26:06 +01002092template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002093 typename Operation = typename HalPolicy::Operation,
2094 typename Model = typename HalPolicy::Model>
2095bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002096{
Mike Kelly46272802019-08-14 17:00:48 +01002097 using Operand = typename HalPolicy::Operand;
2098
2099 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2100 if (!input.IsValid())
2101 {
2102 return Fail("%s: Operation has invalid input", __func__);
2103 }
2104
2105 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2106 if (!outputOperand)
2107 {
2108 return Fail("%s: Operation has invalid outputs", __func__);
2109 }
2110
2111 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2112 if (IsDynamicTensor(outputInfo))
2113 {
2114 return Fail("%s: Dynamic output tensors are not supported", __func__);
2115 }
2116
2117 bool isSupported = false;
2118 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2119 IsDequantizeSupported,
2120 data.m_Backends,
2121 isSupported,
2122 input.GetTensorInfo(),
2123 GetTensorInfoForOperand(*outputOperand));
2124 if (!isSupported)
2125 {
2126 return false;
2127 }
2128
2129 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2130 assert(layer != nullptr);
2131 input.Connect(layer->GetInputSlot(0));
2132
2133 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2134}
2135
2136template<typename HalPolicy,
2137 typename Operation = typename HalPolicy::Operation,
2138 typename Model = typename HalPolicy::Model>
2139bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2140{
2141 using Operand = typename HalPolicy::Operand;
2142
2143 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2144 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2145
2146 if (!input0.IsValid() || !input1.IsValid())
2147 {
2148 return Fail("%s: Operation has invalid inputs", __func__);
2149 }
2150
2151 // The FuseActivation parameter is always the input index 2
2152 // and it should be optional
2153 ActivationFn activationFunction;
2154 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2155 {
2156 return Fail("%s: Operation has invalid inputs", __func__);
2157 }
2158
2159 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2160 if (!output)
2161 {
2162 return Fail("%s: Could not read output 0", __func__);
2163 }
2164
2165 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2166 if (IsDynamicTensor(outputInfo))
2167 {
2168 return Fail("%s: Dynamic output tensors are not supported", __func__);
2169 }
2170
2171 bool isSupported = false;
2172 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2173 IsDivisionSupported,
2174 data.m_Backends,
2175 isSupported,
2176 input0.GetTensorInfo(),
2177 input1.GetTensorInfo(),
2178 outputInfo);
2179 if (!isSupported)
2180 {
2181 return false;
2182 }
2183
2184 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2185 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2186
2187 if (endLayer)
2188 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002189 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2190 if (!isReshapeSupported)
2191 {
2192 return false;
2193 }
2194
Mike Kelly46272802019-08-14 17:00:48 +01002195 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2196 }
2197 return Fail("%s: ProcessActivation failed", __func__);
2198}
2199
2200template<typename HalPolicy,
2201 typename Operation = typename HalPolicy::Operation,
2202 typename Model = typename HalPolicy::Model>
2203bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2204{
2205 using Operand = typename HalPolicy::Operand;
2206
2207 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2208 if (!input.IsValid())
2209 {
2210 return Fail("%s: Operation has invalid inputs", __func__);
2211 }
2212
2213 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2214 if (!outputOperand)
2215 {
2216 return Fail("%s: Operation has invalid outputs", __func__);
2217 }
2218
2219 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2220 if (IsDynamicTensor(outputInfo))
2221 {
2222 return Fail("%s: Dynamic output tensors are not supported", __func__);
2223 }
2224
2225 bool isSupported = false;
2226 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2227 IsFloorSupported,
2228 data.m_Backends,
2229 isSupported,
2230 input.GetTensorInfo(),
2231 outputInfo);
2232 if (!isSupported)
2233 {
2234 return false;
2235 }
2236
2237 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2238 assert(layer != nullptr);
2239 input.Connect(layer->GetInputSlot(0));
2240
2241 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2242}
2243
2244template<typename HalPolicy,
2245 typename Operation = typename HalPolicy::Operation,
2246 typename Model = typename HalPolicy::Model>
2247bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2248{
2249 using Operand = typename HalPolicy::Operand;
2250
2251 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2252 if (!input.IsValid())
2253 {
2254 return Fail("%s: Operation has invalid inputs", __func__);
2255 }
2256
2257 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2258 if (!output)
2259 {
2260 return Fail("%s: Could not read output 0", __func__);
2261 }
2262
2263 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2264 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2265
2266 if (IsDynamicTensor(outputInfo))
2267 {
2268 return Fail("%s: Dynamic output tensors are not supported", __func__);
2269 }
2270
2271 // ArmNN does not currently support non-fixed weights or bias
2272 ConstTensorPin weightsPin =
2273 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2274 ConstTensorPin biasPin =
2275 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2276
2277 if (!weightsPin.IsValid() || !biasPin.IsValid())
2278 {
2279 return Fail("%s: Operation has invalid inputs", __func__);
2280 }
2281
2282 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2283 armnn::ConstTensor bias = biasPin.GetConstTensor();
2284 armnn::TensorInfo reshapedInfo = inputInfo;
2285
2286 try
2287 {
2288 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2289 } catch (const std::exception &e) {
2290 return Fail("%s: %s", __func__, e.what());
2291 }
2292
2293 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2294 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2295
2296 ActivationFn activationFunction;
2297 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2298 {
2299 return Fail("%s: Operation has invalid inputs", __func__);
2300 }
2301
2302 armnn::FullyConnectedDescriptor desc;
2303 desc.m_TransposeWeightMatrix = true;
2304 desc.m_BiasEnabled = true;
2305
2306 bool isSupported = false;
2307 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2308 IsFullyConnectedSupported,
2309 data.m_Backends,
2310 isSupported,
2311 reshapedInfo,
2312 outputInfo,
2313 weights.GetInfo(),
2314 bias.GetInfo(),
2315 desc);
2316 if (!isSupported)
2317 {
2318 return false;
2319 }
2320
2321 armnn::IConnectableLayer* startLayer =
2322 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2323 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2324
2325 if (endLayer != nullptr)
2326 {
2327 if (inputInfo.GetNumDimensions() > 2U)
2328 {
2329 armnn::ReshapeDescriptor reshapeDescriptor;
2330 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2331
2332 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2333 assert(reshapeLayer != nullptr);
2334 input.Connect(reshapeLayer->GetInputSlot(0));
2335 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2336 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2337 }
2338 else
2339 {
2340 input.Connect(startLayer->GetInputSlot(0));
2341 }
2342
2343 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2344 }
2345 else
2346 {
2347 return Fail("%s: ProcessActivation failed", __func__);
2348 }
2349}
2350
2351template<typename HalPolicy,
2352 typename Operation = typename HalPolicy::Operation,
2353 typename Model = typename HalPolicy::Model>
2354bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2355{
Mike Kelly999e2092019-08-15 10:46:46 +01002356 if (operation.inputs.size() != 1)
2357 {
2358 return Fail("%s: Optional inputs are not supported", __func__);
2359 }
2360
Mike Kelly46272802019-08-14 17:00:48 +01002361 using Operand = typename HalPolicy::Operand;
2362
2363 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2364 if (!input.IsValid())
2365 {
2366 return Fail("%s: Operation has invalid inputs", __func__);
2367 }
2368
2369 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2370 if (!output)
2371 {
2372 return Fail("%s: Could not read output 0", __func__);
2373 }
2374
2375 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2376 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2377
2378 if (IsDynamicTensor(outputInfo))
2379 {
2380 return Fail("%s: Dynamic output tensors are not supported", __func__);
2381 }
2382 if (outputInfo.GetNumDimensions() != 4u)
2383 {
2384 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2385 }
2386
2387 armnn::L2NormalizationDescriptor desc;
2388 desc.m_DataLayout = armnn::DataLayout::NHWC;
2389
2390 bool isSupported = false;
2391 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2392 IsL2NormalizationSupported,
2393 data.m_Backends,
2394 isSupported,
2395 inputInfo,
2396 outputInfo,
2397 desc);
2398 if (!isSupported)
2399 {
2400 return false;
2401 }
2402
2403 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2404 assert(layer != nullptr);
2405 input.Connect(layer->GetInputSlot(0));
2406
2407 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2408}
2409
2410template<typename HalPolicy,
2411 typename Operation = typename HalPolicy::Operation,
2412 typename Model = typename HalPolicy::Model>
2413bool ConvertLocalResponseNormalization(const Operation& operation,
2414 const Model& model,
2415 ConversionData& data)
2416{
Mike Kelly999e2092019-08-15 10:46:46 +01002417 if (operation.inputs.size() != 5)
2418 {
2419 return Fail("%s: Optional inputs are not supported", __func__);
2420 }
2421
Mike Kelly46272802019-08-14 17:00:48 +01002422 using Operand = typename HalPolicy::Operand;
2423 using OperandType = typename HalPolicy::OperandType;
2424
2425 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2426 if (!input.IsValid())
2427 {
2428 return Fail("%s: Operation has invalid inputs", __func__);
2429 }
2430
2431 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2432 if (!output)
2433 {
2434 return Fail("%s: Could not read output 0", __func__);
2435 }
2436
2437 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2438 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2439
2440 if (IsDynamicTensor(outputInfo))
2441 {
2442 return Fail("%s: Dynamic output tensors are not supported", __func__);
2443 }
2444 if (outputInfo.GetNumDimensions() != 4u)
2445 {
2446 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2447 }
2448
2449 armnn::NormalizationDescriptor descriptor;
2450 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2451 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2452 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2453
2454 if (!input.IsValid() ||
2455 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2456 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2457 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2458 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2459 {
2460 return Fail("%s: Operation has invalid inputs", __func__);
2461 }
2462
2463 // ArmNN expects normSize to be the full size of the normalization
2464 // window rather than the radius as in AndroidNN.
2465 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2466
2467 bool isSupported = false;
2468 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2469 IsNormalizationSupported,
2470 data.m_Backends,
2471 isSupported,
2472 inputInfo,
2473 outputInfo,
2474 descriptor);
2475 if (!isSupported)
2476 {
2477 return false;
2478 }
2479
2480
2481 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2482 assert(layer != nullptr);
2483 input.Connect(layer->GetInputSlot(0));
2484
2485 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2486}
2487
2488template<typename HalPolicy,
2489 typename Operation = typename HalPolicy::Operation,
2490 typename Model = typename HalPolicy::Model>
2491bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2492{
2493 using Operand = typename HalPolicy::Operand;
2494
2495 armnn::ActivationDescriptor desc;
2496 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2497
2498 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2499}
2500
2501template<typename HalPolicy,
2502 typename Operation = typename HalPolicy::Operation,
2503 typename Model = typename HalPolicy::Model>
2504bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2505{
2506 using Operand = typename HalPolicy::Operand;
2507
2508 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2509 if (!input.IsValid())
2510 {
2511 return Fail("%s: Operation has invalid inputs", __func__);
2512 }
2513
2514 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2515 if (!output)
2516 {
2517 return Fail("%s: Could not read output 0", __func__);
2518 }
2519
2520 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2521 if (IsDynamicTensor(outputInfo))
2522 {
2523 return Fail("%s: Dynamic output tensors are not supported", __func__);
2524 }
2525
2526 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2527 if (!axisOperand)
2528 {
2529 return Fail("%s: Could not read input 1", __func__);
2530 }
2531
2532 std::vector<int32_t> axis;
2533 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2534 {
2535 return Fail("%s: Input 1 has invalid values", __func__);
2536 }
2537
2538 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2539
2540 // Convert the axis to unsigned int and remove duplicates.
2541 unsigned int rank = inputInfo.GetNumDimensions();
2542 std::set<unsigned int> uniqueAxis;
2543 std::transform(axis.begin(), axis.end(),
2544 std::inserter(uniqueAxis, uniqueAxis.begin()),
2545 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2546
2547 // Get the "keep dims" flag.
2548 int32_t keepDims = 0;
2549 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2550 {
2551 return Fail("%s: Could not read input 2", __func__);
2552 }
2553
2554 armnn::MeanDescriptor descriptor;
2555 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2556 descriptor.m_KeepDims = keepDims > 0;
2557
2558 bool isSupported = false;
2559 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2560 IsMeanSupported,
2561 data.m_Backends,
2562 isSupported,
2563 inputInfo,
2564 outputInfo,
2565 descriptor);
2566 if (!isSupported)
2567 {
2568 return false;
2569 }
2570
2571 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2572 assert(layer != nullptr);
2573 input.Connect(layer->GetInputSlot(0));
2574
2575 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2576}
2577
2578template<typename HalPolicy,
2579 typename Operation = typename HalPolicy::Operation,
2580 typename Model = typename HalPolicy::Model>
2581bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2582{
2583 using Operand = typename HalPolicy::Operand;
2584
2585 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2586 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2587
2588 if (!input0.IsValid() || !input1.IsValid())
2589 {
2590 return Fail("%s: Operation has invalid inputs", __func__);
2591 }
2592
2593 // The FuseActivation parameter is always the input index 2
2594 // and it should be optional
2595 ActivationFn activationFunction;
2596 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2597 {
2598 return Fail("%s: Operation has invalid inputs", __func__);
2599 }
2600
2601 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2602
2603 if (outputOperand == nullptr)
2604 {
2605 return false;
2606 }
2607
2608 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2609 if (IsDynamicTensor(outputInfo))
2610 {
2611 return Fail("%s: Dynamic output tensors are not supported", __func__);
2612 }
2613
2614 bool isSupported = false;
2615 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2616 IsMultiplicationSupported,
2617 data.m_Backends,
2618 isSupported,
2619 input0.GetTensorInfo(),
2620 input1.GetTensorInfo(),
2621 outputInfo);
2622 if (!isSupported)
2623 {
2624 return false;
2625 }
2626
2627 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2628 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2629
2630 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2631 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2632
2633 if (endLayer != nullptr)
2634 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002635 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2636 if (!isReshapeSupported)
2637 {
2638 return false;
2639 }
2640
Mike Kelly46272802019-08-14 17:00:48 +01002641 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2642 }
2643 else
2644 {
2645 return Fail("%s: ProcessActivation failed", __func__);
2646 }
2647}
2648
2649template<typename HalPolicy,
2650 typename Operation = typename HalPolicy::Operation,
2651 typename Model = typename HalPolicy::Model>
2652bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2653{
2654 using Operand = typename HalPolicy::Operand;
2655
Mike Kelly3c673942019-07-25 09:26:06 +01002656 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2657 if (!input.IsValid())
2658 {
2659 return Fail("%s: Operation has invalid inputs", __func__);
2660 }
2661
2662 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2663 unsigned int rank = inputInfo.GetNumDimensions();
2664
2665 armnn::PadDescriptor descriptor;
2666 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2667 {
2668 return Fail("%s: Could not convert paddings", __func__);
2669 }
2670
2671 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2672 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2673 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2674 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2675 {
2676 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2677 }
2678
Mike Kelly46272802019-08-14 17:00:48 +01002679 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002680 if (!output)
2681 {
2682 return Fail("%s: Could not read output", __func__);
2683 }
2684
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002685 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002686 if (IsDynamicTensor(outputInfo))
2687 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002688 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002689 }
2690
2691 bool isSupported = false;
2692 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2693 IsPadSupported,
2694 data.m_Backends,
2695 isSupported,
2696 inputInfo,
2697 outputInfo,
2698 descriptor);
2699 if (!isSupported)
2700 {
2701 return false;
2702 }
2703
2704 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2705 assert(layer != nullptr);
2706 input.Connect(layer->GetInputSlot(0));
2707 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2708
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002709 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002710}
2711
Mike Kelly0a879362019-07-29 16:56:31 +01002712template<typename HalPolicy,
2713 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002714 typename Model = typename HalPolicy::Model>
2715bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2716{
2717 using Operand = typename HalPolicy::Operand;
2718
2719 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2720 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2721 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2722
2723 if (inputOperand == nullptr
2724 || requestedShapeOperand == nullptr
2725 || outputOperand == nullptr)
2726 {
2727 return Fail("%s: Operation has invalid inputs", __func__);
2728 }
2729
2730 if (requestedShapeOperand->dimensions.size() != 1)
2731 {
2732 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2733 __func__, requestedShapeOperand->dimensions.size());
2734 }
2735
2736 std::vector<int32_t> targetDimensions;
2737 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2738 {
2739 return Fail("%s: Could not read values of input 1", __func__);
2740 }
2741
2742 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2743
2744 Shape requestedShape;
2745 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2746 // function that resolves these values into a fully specified tensor shape.
2747 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2748 {
2749 return Fail("%s: Failed to resolve the requested shape", __func__);
2750 }
2751
2752 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2753 if (!SameShape(requestedShape, outputOperandShape))
2754 {
2755 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2756 }
2757
2758 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2759 if (!input.IsValid())
2760 {
2761 return Fail("%s: Could not read input 0", __func__);
2762 }
2763
2764 armnn::ReshapeDescriptor reshapeDescriptor;
2765 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2766 requestedShape.dimensions.data());
2767
2768 bool isSupported = false;
2769 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2770 IsReshapeSupported,
2771 data.m_Backends,
2772 isSupported,
2773 input.GetTensorInfo(),
2774 reshapeDescriptor);
2775 if (!isSupported)
2776 {
2777 return false;
2778 }
2779
2780 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2781 assert(layer != nullptr);
2782 input.Connect(layer->GetInputSlot(0));
2783
2784 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2785}
2786
2787template<typename HalPolicy,
2788 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002789 typename Model = typename HalPolicy::Model>
2790bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2791{
Mike Kelly46272802019-08-14 17:00:48 +01002792 using Operand = typename HalPolicy::Operand;
2793
Mike Kelly0a879362019-07-29 16:56:31 +01002794 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2795 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2796
2797 if (!input0.IsValid() || !input1.IsValid())
2798 {
2799 return Fail("%s: Operation has invalid inputs", __func__);
2800 }
2801
2802 // The FuseActivation parameter is always the input index 2
2803 // and it should be optional
2804 ActivationFn activationFunction;
2805 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2806 {
2807 return Fail("%s: Operation has invalid inputs", __func__);
2808 }
2809
2810 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2811 if (!output)
2812 {
2813 return Fail("%s: Could not read output 0", __func__);
2814 }
2815
2816 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2817 if (IsDynamicTensor(outputInfo))
2818 {
2819 return Fail("%s: Dynamic output tensors are not supported", __func__);
2820 }
2821
2822 bool isSupported = false;
2823 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2824 IsSubtractionSupported,
2825 data.m_Backends,
2826 isSupported,
2827 input0.GetTensorInfo(),
2828 input1.GetTensorInfo(),
2829 outputInfo);
2830 if (!isSupported)
2831 {
2832 return false;
2833 }
2834
2835 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2836 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2837
2838 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2839 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2840
2841 if (endLayer)
2842 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002843 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2844 if (!isReshapeSupported)
2845 {
2846 return false;
2847 }
Mike Kelly0a879362019-07-29 16:56:31 +01002848 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2849 }
2850
2851 return Fail("%s: ProcessActivation failed", __func__);
2852}
2853
Finn Williams23b87b32019-07-30 11:44:05 +01002854template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002855 typename Operation = typename HalPolicy::Operation,
2856 typename Model = typename HalPolicy::Model>
2857bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2858{
2859 using Operand = typename HalPolicy::Operand;
2860
2861 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2862 if (!input.IsValid())
2863 {
2864 return Fail("%s: Operation has invalid inputs", __func__);
2865 }
2866
2867 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2868 unsigned int rank = inputInfo.GetNumDimensions();
2869 if (rank > 4)
2870 {
2871 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2872 }
2873
2874 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2875 if (!output)
2876 {
2877 return Fail("%s: Could not read output 0", __func__);
2878 }
2879
2880 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2881 {
2882 return Fail("%s: Dynamic output tensors are not supported", __func__);
2883 }
2884
2885 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2886 // if the operand index is out of bounds.
2887 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2888
2889 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2890
2891 std::vector<int32_t> axis;
2892 if (!axisOperand)
2893 {
2894 axis.assign(dimensionSequence,
2895 dimensionSequence + rank);
2896 }
2897 else
2898 {
2899 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2900 }
2901
2902 std::vector<uint32_t> outputDims;
2903 for (unsigned int i = 0; i < rank; i++)
2904 {
2905 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2906 auto currentDimension = inputInfo.GetShape()[i];
2907 if (skipSqueeze || currentDimension != 1)
2908 {
2909 outputDims.push_back(currentDimension);
2910 }
2911 }
2912
2913 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2914
2915 armnn::TensorInfo outputInfo = inputInfo;
2916 outputInfo.SetShape(outShape);
2917
2918 armnn::ReshapeDescriptor reshapeDesc;
2919 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2920
2921 bool isSupported = false;
2922 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2923 IsReshapeSupported,
2924 data.m_Backends,
2925 isSupported,
2926 inputInfo,
2927 reshapeDesc);
2928 if (!isSupported)
2929 {
2930 return false;
2931 }
2932
2933 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2934 assert(layer != nullptr);
2935 input.Connect(layer->GetInputSlot(0));
2936
2937 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2938}
2939
2940template<typename HalPolicy,
2941 typename Operation = typename HalPolicy::Operation,
2942 typename Model = typename HalPolicy::Model>
2943bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2944{
2945 using Operand = typename HalPolicy::Operand;
2946
2947 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2948 if (!input.IsValid())
2949 {
2950 return Fail("%s: Operation has invalid inputs", __func__);
2951 }
2952
2953 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2954 unsigned int rank = inputInfo.GetNumDimensions();
2955 if (rank > 4)
2956 {
2957 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2958 }
2959
2960 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2961 if (!output)
2962 {
2963 return Fail("%s: Could not read output 0", __func__);
2964 }
2965
2966 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2967 if (IsDynamicTensor(outputInfo))
2968 {
2969 return Fail("%s: Dynamic output tensors are not supported", __func__);
2970 }
2971
2972 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2973 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2974 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
2975
2976 std::vector<int32_t> beginValues;
2977 std::vector<int32_t> endValues;
2978 std::vector<int32_t> stridesValues;
2979
2980 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
2981 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
2982 {
2983 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
2984 {
2985 return false;
2986 }
2987
2988 if (operandValues.size() != rank)
2989 {
2990 return false;
2991 }
2992
2993 return true;
2994 };
2995
2996 if (!ValidateInputOperands(*beginOperand, beginValues)
2997 || !ValidateInputOperands(*endOperand, endValues)
2998 || !ValidateInputOperands(*stridesOperand, stridesValues))
2999 {
3000 return Fail("%s: Operation has invalid input operand", __func__);
3001 }
3002
3003 // Stride cannot have value '0'
3004 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3005 {
3006 return Fail("%s: Stride must be non-zero value.", __func__);
3007 }
3008
3009 armnn::StridedSliceDescriptor descriptor;
3010 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3011 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3012 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3013 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3014
3015 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3016 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3017 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3018 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3019 {
3020 return Fail("%s: Operation has invalid inputs", __func__);
3021 }
3022
3023 bool isSupported = false;
3024 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3025 IsStridedSliceSupported,
3026 data.m_Backends,
3027 isSupported,
3028 inputInfo,
3029 outputInfo,
3030 descriptor);
3031 if (!isSupported)
3032 {
3033 return false;
3034 }
3035
3036 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3037 assert(layer != nullptr);
3038 input.Connect(layer->GetInputSlot(0));
3039
3040 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3041}
3042
3043template<typename HalPolicy,
3044 typename Operation = typename HalPolicy::Operation,
3045 typename Model = typename HalPolicy::Model>
3046bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3047{
3048 using Operand = typename HalPolicy::Operand;
3049
3050 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3051 if (!input.IsValid())
3052 {
3053 return Fail("%s: Operation has invalid inputs", __func__);
3054 }
3055
3056 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3057 unsigned int rank = inputInfo.GetNumDimensions();
3058 if (rank > 4)
3059 {
3060 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3061 }
3062
3063 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3064 // if the operand index is out of bounds.
3065 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3066
3067 std::vector<int32_t> perm(rank);
3068 if (!permOperand)
3069 {
3070 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3071 for (unsigned int i = rank; i > 0; i--)
3072 {
3073 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3074 }
3075 }
3076 else
3077 {
3078 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3079 }
3080
3081 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3082
3083 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3084 if (!permutationVector.IsEqual(NHWCToArmNN)
3085 && !permutationVector.IsEqual(ArmNNToNHWC)
3086 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3087 {
3088 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3089 }
3090
3091 armnn::PermuteDescriptor permuteDesc;
3092 permuteDesc.m_DimMappings = permutationVector;
3093
3094 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3095 if (!output)
3096 {
3097 return Fail("%s: Could not read output 0", __func__);
3098 }
3099
3100 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3101
3102 bool isSupported = false;
3103 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3104 IsPermuteSupported,
3105 data.m_Backends,
3106 isSupported,
3107 inputInfo,
3108 outputInfo,
3109 permuteDesc);
3110 if (!isSupported)
3111 {
3112 return false;
3113 }
3114
3115 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3116 assert(layer != nullptr);
3117 input.Connect(layer->GetInputSlot(0));
3118
3119 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3120}
3121
3122template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003123 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003124 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003125 typename HalModel = typename HalPolicy::Model>
3126bool ConvertBatchToSpaceNd(const HalOperation& operation,
3127 const HalModel& model,
3128 ConversionData& data)
3129{
Finn Williams23b87b32019-07-30 11:44:05 +01003130
3131 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3132 if (!input.IsValid())
3133 {
3134 return Fail("%s: Operation has invalid inputs", __func__);
3135 }
3136
3137 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3138 if (!output)
3139 {
3140 return Fail("%s: Could not read output 0", __func__);
3141 }
3142
3143 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3144 if (IsDynamicTensor(outputInfo))
3145 {
3146 return Fail("%s: Dynamic output tensors are not supported", __func__);
3147 }
3148
3149 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3150 if (!blockOperand)
3151 {
3152 return Fail("%s: Could not read input 1", __func__);
3153 }
3154
3155 // Convert the block operand to int32
3156 std::vector<int32_t> block;
3157 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3158 {
3159 return Fail("%s: Input 1 has invalid values", __func__);
3160 }
3161
3162 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3163
3164 unsigned int rank = inputInfo.GetNumDimensions();
3165 if (rank != 4)
3166 {
3167 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3168 }
3169
3170 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3171 {
3172 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3173 " greater than or equal to 1", __func__);
3174 }
3175
3176 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3177 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3178 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3179
3180 if (Is12Operand(*output))
3181 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003182 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003183 }
3184 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3185 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3186
3187 bool isSupported = false;
3188 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3189 IsBatchToSpaceNdSupported,
3190 data.m_Backends,
3191 isSupported,
3192 inputInfo,
3193 outputInfo,
3194 batchToSpaceNdDesc);
3195 if (!isSupported)
3196 {
3197 return false;
3198 }
3199
3200 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3201 assert(layer != nullptr);
3202 input.Connect(layer->GetInputSlot(0));
3203
3204 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3205}
Mike Kelly0a879362019-07-29 16:56:31 +01003206
Finn Williamsd74c5052019-07-30 17:06:00 +01003207template<typename HalPolicy,
3208 typename HalOperation = typename HalPolicy::Operation,
3209 typename HalOperand = typename HalPolicy::Operand,
3210 typename HalModel = typename HalPolicy::Model>
3211bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3212{
3213 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3214 if (!input.IsValid())
3215 {
3216 return Fail("%s: Operation has invalid inputs", __func__);
3217 }
3218
3219 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3220 unsigned int rank = inputInfo.GetNumDimensions();
3221 unsigned int spatialDim = rank - 2;
3222
3223 if (rank != 4)
3224 {
3225 Fail("%s: Only inputs with rank 4 are supported", __func__);
3226 }
3227
3228 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3229 if (!output)
3230 {
3231 return Fail("%s: Could not read output 0", __func__);
3232 }
3233
3234 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3235 if (IsDynamicTensor(outputInfo))
3236 {
3237 return Fail("%s: Dynamic output tensors are not supported", __func__);
3238 }
3239
3240 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3241 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3242
3243 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3244 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3245 {
3246 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3247 }
3248
3249 std::vector<int32_t> blockShape;
3250 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3251 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3252 {
3253 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3254 }
3255
3256 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3257 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3258 {
3259 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3260 }
3261
3262 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3263 std::vector<int32_t> paddings;
3264 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3265 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3266 {
3267 int paddingBeforeInput = paddings[i];
3268 int paddingAfterInput = paddings[i + 1];
3269 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3270 {
3271 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3272 }
3273
3274 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3275 }
3276
3277 armnn::SpaceToBatchNdDescriptor descriptor;
3278 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3279 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3280 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3281
3282 if (Is12Operand(*output))
3283 {
3284 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3285 }
3286
3287 bool isSupported = false;
3288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3289 IsSpaceToBatchNdSupported,
3290 data.m_Backends,
3291 isSupported,
3292 inputInfo,
3293 outputInfo,
3294 descriptor);
3295 if (!isSupported)
3296 {
3297 return false;
3298 }
3299
3300 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3301 assert(layer != nullptr);
3302 input.Connect(layer->GetInputSlot(0));
3303
3304 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3305}
3306
Kevin May407718f2019-09-09 14:46:41 +01003307template<typename HalPolicy,
3308 typename HalOperation = typename HalPolicy::Operation,
3309 typename HalModel = typename HalPolicy::Model>
3310bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3311{
3312 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3313
3314 if (!input.IsValid())
3315 {
3316 return Fail("%s: Operation has invalid input", __func__);
3317 }
3318
3319 using HalOperand = typename HalPolicy::Operand;
3320 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3321 if (!output)
3322 {
3323 return Fail("%s: Could not read output 0", __func__);
3324 }
3325
3326 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3327 if (IsDynamicTensor(outputInfo))
3328 {
3329 return Fail("%s: Dynamic output tensors are not supported", __func__);
3330 }
3331
3332 bool isSupported = false;
3333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3334 IsAbsSupported,
3335 data.m_Backends,
3336 isSupported,
3337 input.GetTensorInfo(),
3338 outputInfo);
3339
3340 if (!isSupported)
3341 {
3342 return false;
3343 }
3344
3345 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3346 assert(layer != nullptr);
3347 input.Connect(layer->GetInputSlot(0));
3348
3349 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3350}
3351
3352
saoste01b8471482018-10-10 09:44:51 +01003353} // namespace armnn_driver