blob: 683da5e44ac799c21dd9e179fdcc8844b288e105 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
128 std::string reasonIfUnsupported; \
129 try { \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
134 { \
135 supported = \
136 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
137 if (supported) \
138 { \
139 break; \
140 } \
141 else \
142 { \
143 if (reasonIfUnsupported.size() > 0) \
144 { \
145 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
146 } \
147 else \
148 { \
149 ALOGD("%s: not supported by armnn", funcName); \
150 } \
151 } \
152 } \
153 else \
154 { \
155 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
156 } \
157 } \
158 if (!supported) \
159 { \
160 ALOGD("%s: not supported by any specified backend", funcName); \
161 } \
162 } catch (const armnn::InvalidArgumentException &e) { \
163 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100164 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100165
Mike Kellyb5fdf382019-06-11 16:35:25 +0100166template<typename Operand>
167armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100168{
169 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
170}
171
Matthew Bentham912b3622019-05-03 15:49:14 +0100172inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100173{
Matthew Bentham912b3622019-05-03 15:49:14 +0100174 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
175 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
176 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100177}
178
Mike Kellyb5fdf382019-06-11 16:35:25 +0100179#ifdef ARMNN_ANDROID_NN_V1_2
180
181inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
182{
183 return type == V1_2::OperandType::BOOL ||
184 type == V1_2::OperandType::TENSOR_FLOAT16 ||
185 type == V1_2::OperandType::TENSOR_FLOAT32 ||
186 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
188 type == V1_2::OperandType::TENSOR_INT32;
189}
190
191#endif
192
193inline bool IsBool(V1_0::Operand)
194{
195 return false;
196}
197
Sadik Armagan61113162019-07-25 09:09:40 +0100198inline bool Is12Operand(V1_0::Operand)
199{
200 return false;
201}
202
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203#ifdef ARMNN_ANDROID_NN_V1_2
204
205inline bool IsBool(V1_2::Operand operand)
206{
207 return operand.type == V1_2::OperandType::BOOL;
208}
209
Sadik Armagan61113162019-07-25 09:09:40 +0100210/// Checks if a operand is 1_2 Operand
211inline bool Is12Operand(V1_2::Operand)
212{
213 return true;
214}
215
Mike Kellyb5fdf382019-06-11 16:35:25 +0100216#endif
217
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100218template<typename LayerHandleType>
219armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
220 armnn::TensorInfo reshapeInfo)
221{
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
224
225 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
226 BOOST_ASSERT(reshapeLayer != nullptr);
227
228 // Attach the input layer to the reshape layer
229 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
230 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
231
232 return *reshapeLayer;
233}
234
Sadik Armagan64b19b52019-08-19 09:49:58 +0100235bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
236 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100237{
238 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100239
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100240 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
241 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
242
243 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
244 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
245
246 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100247 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100248 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
249 input0.Connect(startLayer->GetInputSlot(0));
250 input1.Connect(startLayer->GetInputSlot(1));
251
Sadik Armagan64b19b52019-08-19 09:49:58 +0100252 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100253 }
254
255 // Since the number of dimensions do not match then we need to add degenerate dimensions
256 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
257
258 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
259 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
260 boost::numeric_cast<int>(inputDimensions1));
261
262 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
263 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
264 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
265
266 const armnn::TensorShape& smallShape = smallInfo.GetShape();
267 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
268 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
269 {
270 reshapedDimensions[i] = smallShape[i - sizeDifference];
271 }
272
273 armnn::TensorInfo reshapedInfo = smallInfo;
274 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
275 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100276
277 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
278 armnn::ReshapeDescriptor reshapeDescriptor;
279
280 bool isSupported = false;
281 FORWARD_LAYER_SUPPORT_FUNC(__func__,
282 IsReshapeSupported,
283 data.m_Backends,
284 isSupported,
285 reshapedInfo,
286 reshapeDescriptor);
287 if (!isSupported)
288 {
289 return false;
290 }
291
292 BOOST_ASSERT(data.m_Network != nullptr);
293 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100294
295 if (input0IsSmaller)
296 {
297 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
298 //
299 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100300 // | |
301 // Reshape |
302 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100303 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100304
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
306 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308 else
309 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
311 //
312 // Input0 Input1
313 // | |
314 // | Reshape
315 // \ /
316 // StartLayer
317
arovir01b0717b52018-09-05 17:03:25 +0100318 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100320 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100321
322 return true;
arovir01b0717b52018-09-05 17:03:25 +0100323}
324
325void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
326 android::nn::PaddingScheme scheme)
327{
328 int32_t padHead;
329 int32_t padTail;
330 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
331 outPadHead = boost::numeric_cast<uint32_t>(padHead);
332 outPadTail = boost::numeric_cast<uint32_t>(padTail);
333}
334
Mike Kelly86b36d42019-07-12 16:39:33 +0100335#ifdef ARMNN_ANDROID_NN_V1_2
336
337void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
338 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
339{
340 int32_t padHead;
341 int32_t padTail;
342 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
343 outPadHead = boost::numeric_cast<uint32_t>(padHead);
344 outPadTail = boost::numeric_cast<uint32_t>(padTail);
345}
346
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100347void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
348 int32_t& outPadTail, android::nn::PaddingScheme scheme)
349{
350 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
351}
352
Mike Kelly86b36d42019-07-12 16:39:33 +0100353#endif
354
Matthew Bentham912b3622019-05-03 15:49:14 +0100355Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100356{
357 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100358 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100359 shape.dimensions = operand.dimensions;
360 shape.scale = operand.scale;
361 shape.offset = operand.zeroPoint;
362 return shape;
363}
364
Mike Kelly46272802019-08-14 17:00:48 +0100365#ifdef ARMNN_ANDROID_NN_V1_2
366
367Shape GetOperandShape(const V1_2::Operand& operand)
368{
369 Shape shape;
370 shape.type = OperandType(operand.type);
371 shape.dimensions = operand.dimensions;
372 shape.scale = operand.scale;
373 shape.offset = operand.zeroPoint;
374 return shape;
375}
376
377#endif
378
arovir01b0717b52018-09-05 17:03:25 +0100379// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
380// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100381// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
382// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100383void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
384 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
385{
386 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
387 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
388 {
389 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
390 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
391 {
392 ALOGW("Bias quantization scale has been modified to match input*weights");
393 biasInfo.SetQuantizationScale(expectedBiasScale);
394 }
395 }
396}
397
398// 4D Tensor Permutations
399const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
400const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
401const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
402const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
403
404// 3D Permutation Vectors
405const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
406const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
407const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
408
409template<typename OSlot>
410armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
411 const armnn::PermutationVector& mappings)
412{
413 // Add swizzle layer
414 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
415
416 BOOST_ASSERT(layer != nullptr);
417
418 // Connect input to swizzle layer
419 input.Connect(layer->GetInputSlot(0));
420
421 // Setup swizzled output
422 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
423 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
424
425 return *layer;
426}
427
428void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
429{
430 // Add swizzle layer
431 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
432 // Connect swizzled input to layer
433 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
434}
435
436armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
437{
438 // Add deswizzle layer
439 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
440 return deswizzleLayer;
441}
442
443// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
444armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
445 LayerInputHandle& input,
446 armnn::IConnectableLayer& firstLayer,
447 armnn::IConnectableLayer& lastLayer)
448{
449 SwizzleIn(network, input, firstLayer, 0);
450 return DeswizzleOut(network, lastLayer, 0);
451}
452
453// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
454armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
455 armnn::IConnectableLayer& layer)
456{
457 return SwizzleInDeswizzleOut(network, input, layer, layer);
458}
459
460bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
461 const armnn::TensorShape & outputShape,
462 uint32_t concatDim)
463{
464 // Validate the output shape is correct given the input shapes (which have just been validated)
465 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
466 if (outputShape.GetNumDimensions() != numDimensions)
467 {
468 return Fail("%s: Output shape has wrong number of dimensions", __func__);
469 }
470
471 unsigned int outputSizeAlongConcatenatedDimension = 0;
472 for (unsigned int i = 0; i < inputShapes.size(); i++)
473 {
474 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
475 }
476
477 for (unsigned int i = 0; i < numDimensions; ++i)
478 {
479 if (i == concatDim)
480 {
481 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
482 {
483 return Fail(
484 "%s: Invalid output shape for dimension %d (%d != %d)",
485 __func__,
486 i,
487 outputShape[i],
488 outputSizeAlongConcatenatedDimension);
489 }
490 }
491 else
492 {
493 if (outputShape[i] != inputShapes[0][i])
494 {
495 return Fail("%s: Invalid output shape", __func__);
496 }
497 }
498 }
499
500 return true;
501}
502
503bool RequiresReshape(armnn::TensorShape & inputShape)
504{
505 return inputShape.GetNumDimensions() < 3;
506}
507
arovir01b0717b52018-09-05 17:03:25 +0100508void SwizzleInputs(armnn::INetwork& network,
509 std::vector<LayerInputHandle>& inputs,
510 std::vector<armnn::TensorShape>& inputShapes,
511 const armnn::PermutationVector& mapping)
512{
513 if (!mapping.IsEqual(IdentityPermutation4D))
514 {
515 size_t nInputs = inputs.size();
516 for (size_t i=0; i<nInputs; ++i)
517 {
518 // add swizzle layer
519 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
520 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
521 auto& outputInfo = outputSlot.GetTensorInfo();
522 // replace inputs with the swizzled ones
523 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
524 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
525 }
526 }
527}
528
narpra01f176d5a2018-11-18 20:17:48 +0000529bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
530 int32_t & concatDimension,
531 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100532{
narpra01f176d5a2018-11-18 20:17:48 +0000533 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100534 BOOST_ASSERT(numberOfDimensions >= 3);
535
536 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000537 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
538 // or along dimension 0 or 2 for a 3-D tensor.
539 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100540 {
narpra01f176d5a2018-11-18 20:17:48 +0000541 concatDimension = 1;
542 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
543 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100544 }
narpra01f176d5a2018-11-18 20:17:48 +0000545 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100546 {
narpra01f176d5a2018-11-18 20:17:48 +0000547 concatDimension = 0;
548 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
549 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100550 }
narpra01f176d5a2018-11-18 20:17:48 +0000551 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100552}
553
554} // anonymous namespace
555
556namespace armnn_driver
557{
558
559//// Creates an ArmNN activation layer and connects it to the given layer, if the
560//// passed in AndroidNN activation function requires so.
561//// @return The end layer of the sequence of layers built for the given AndroidNN
562//// activation function or nullptr if an error occurred (e.g. unsupported activation).
563//// Note that the end layer matches the input layer if no activation is required
564//// (the sequence of layers has length 1).
565armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
566 ActivationFn activation,
567 armnn::IConnectableLayer* prevLayer,
568 ConversionData& data);
569
570} // namespace armnn_driver
571
572///
573/// Utility templates
574///
575
576namespace armnn_driver
577{
578
579using namespace android::nn;
580
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581template<typename HalPolicy,
582 typename HalOperand = typename HalPolicy::Operand,
583 typename HalOperation = typename HalPolicy::Operation,
584 typename HalModel = typename HalPolicy::Model>
585const HalOperand* GetInputOperand(const HalOperation& operation,
586 uint32_t inputIndex,
587 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100588 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100589{
590 if (inputIndex >= operation.inputs.size())
591 {
saoste01b8471482018-10-10 09:44:51 +0100592 if (failOnIndexOutOfBounds)
593 {
594 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
595 }
arovir01b0717b52018-09-05 17:03:25 +0100596 return nullptr;
597 }
598
599 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
600 return &model.operands[operation.inputs[inputIndex]];
601}
602
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100603template<typename HalPolicy,
604 typename HalOperand = typename HalPolicy::Operand,
605 typename HalOperation = typename HalPolicy::Operation,
606 typename HalModel = typename HalPolicy::Model>
607const HalOperand* GetOutputOperand(const HalOperation& operation,
608 uint32_t outputIndex,
609 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100610{
611 if (outputIndex >= operation.outputs.size())
612 {
613 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
614 return nullptr;
615 }
616
617 // Model should have been validated beforehand
618 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
619
620 return &model.operands[operation.outputs[outputIndex]];
621}
622
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100623template<typename HalPolicy,
624 typename HalOperand = typename HalPolicy::Operand,
625 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100626const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100627 const HalModel& model,
628 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000629 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100630{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100631 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100632
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100633 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100634 switch (operand.lifetime)
635 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100636 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100637 {
638 // Constant found in model.operandValues
639 valueStart = &model.operandValues[operand.location.offset];
640 break;
641 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100643 {
644 // Constant specified via a Memory object
645 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
646 break;
647 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100648 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000649 {
650 // An optional input tensor with no values is not an error so should not register as a fail
651 if (optional)
652 {
653 valueStart = nullptr;
654 break;
655 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100656 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000657 }
arovir01b0717b52018-09-05 17:03:25 +0100658 default:
659 {
660 // Unsupported/invalid (e.g. can't get value of an input to the model)
661 Fail("%s: unsupported/invalid operand lifetime: %s",
662 __func__, toString(operand.lifetime).c_str());
663 valueStart = nullptr;
664 }
665 }
666
667 return valueStart;
668}
669
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100671 typename HalOperation = typename HalPolicy::Operation,
672 typename HalModel = typename HalPolicy::Model,
673 typename HalOperandType = typename HalPolicy::OperandType>
674bool GetOperandType(const HalOperation& operation,
675 uint32_t inputIndex,
676 const HalModel& model,
677 HalOperandType& type)
678{
679 using HalOperand = typename HalPolicy::Operand;
680
681 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
682 if (!operand)
683 {
684 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
685 }
686
687 type = operand->type;
688 return true;
689}
690
691template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100692 typename HalOperand = typename HalPolicy::Operand,
693 typename HalModel = typename HalPolicy::Model>
694ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
695 const HalModel& model,
696 const ConversionData& data,
697 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
698 const armnn::TensorShape* overrideTensorShape = nullptr,
699 bool optional = false)
700{
701 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
702
703 if (!IsOperandTypeSupportedForTensors(operand.type))
704 {
705 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
706 return ConstTensorPin();
707 }
708
709 if (!optional &&
710 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
711 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
712 operand.lifetime != HalOperandLifeTime::NO_VALUE)
713 {
714 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
715 return ConstTensorPin();
716 }
717
718 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
719 if (!valueStart)
720 {
721 if (optional)
722 {
723 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
724 return ConstTensorPin(true);
725 }
726 // mandatory tensor with no values
727 Fail("%s: failed to get operand address", __func__);
728 return ConstTensorPin();
729 }
730
731 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
732 if (overrideTensorShape != nullptr)
733 {
734 tensorInfo.SetShape(*overrideTensorShape);
735 }
736 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
737}
738
739template<typename HalPolicy,
740 typename HalOperation = typename HalPolicy::Operation,
741 typename HalModel = typename HalPolicy::Model>
742ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
743 uint32_t inputIndex,
744 const HalModel& model,
745 const ConversionData& data,
746 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
747 const armnn::TensorShape* overrideTensorShape = nullptr,
748 bool optional = false)
749{
750 using HalOperand = typename HalPolicy::Operand;
751
752 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
753 if (!operand)
754 {
755 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
756 return ConstTensorPin();
757 }
758 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
759 model,
760 data,
761 dimensionMappings,
762 overrideTensorShape,
763 optional);
764}
765
766template<typename HalPolicy,
767 typename OutputType,
768 typename HalOperandType = typename HalPolicy::OperandType,
769 typename HalOperation = typename HalPolicy::Operation,
770 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100771bool GetInputScalar(const HalOperation& operation,
772 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100773 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100774 OutputType& outValue,
775 const HalModel& model,
776 const ConversionData& data)
777{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100778 using HalOperand = typename HalPolicy::Operand;
779
780 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100781 if (!operand)
782 {
783 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
784 }
785
786 if (operand->type != type)
787 {
788 return Fail("%s: unexpected operand type: %s (should be %s)",
789 __func__, toString(operand->type).c_str(), toString(type).c_str());
790 }
791
792 if (operand->location.length != sizeof(OutputType))
793 {
794 return Fail("%s: incorrect operand location length: %i (should be %i)",
795 __func__, operand->location.length, sizeof(OutputType));
796 }
797
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100798 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100799 if (!valueAddress)
800 {
801 return Fail("%s: failed to get address for operand", __func__);
802 }
803
804 outValue = *(static_cast<const OutputType*>(valueAddress));
805 return true;
806}
807
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808template<typename HalPolicy,
809 typename HalOperation = typename HalPolicy::Operation,
810 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100811bool GetInputInt32(const HalOperation& operation,
812 uint32_t inputIndex,
813 int32_t& outValue,
814 const HalModel& model,
815 const ConversionData& data)
816{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100817 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100818}
819
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820template<typename HalPolicy,
821 typename HalOperation = typename HalPolicy::Operation,
822 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100823bool GetInputFloat32(const HalOperation& operation,
824 uint32_t inputIndex,
825 float& outValue,
826 const HalModel& model,
827 const ConversionData& data)
828{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100829 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100830}
831
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100832template<typename HalPolicy,
833 typename HalOperation = typename HalPolicy::Operation,
834 typename HalOperandType = typename HalPolicy::OperandType,
835 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100836bool GetInputActivationFunctionImpl(const HalOperation& operation,
837 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100838 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100839 ActivationFn& outActivationFunction,
840 const HalModel& model,
841 const ConversionData& data)
842{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100843 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100844 {
845 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
846 __func__,
847 toString(type).c_str(),
848 toString(OperandType::INT32).c_str(),
849 toString(OperandType::TENSOR_INT32).c_str());
850 }
851
852 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100854 {
855 return Fail("%s: failed to get activation input value", __func__);
856 }
857 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
858 return true;
859}
860
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100861template<typename HalPolicy,
862 typename HalOperation = typename HalPolicy::Operation,
863 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100864bool GetInputActivationFunction(const HalOperation& operation,
865 uint32_t inputIndex,
866 ActivationFn& outActivationFunction,
867 const HalModel& model,
868 const ConversionData& data)
869{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100870 return GetInputActivationFunctionImpl<HalPolicy>(operation,
871 inputIndex,
872 HalPolicy::OperandType::INT32,
873 outActivationFunction,
874 model,
875 data);
arovir01b0717b52018-09-05 17:03:25 +0100876}
877
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878template<typename HalPolicy,
879 typename HalOperation = typename HalPolicy::Operation,
880 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100881bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
882 uint32_t inputIndex,
883 ActivationFn& outActivationFunction,
884 const HalModel& model,
885 const ConversionData& data)
886{
887 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100888 return GetInputActivationFunctionImpl<HalPolicy>(operation,
889 inputIndex,
890 HalPolicy::OperandType::INT32,
891 outActivationFunction,
892 model,
893 data);
arovir01b0717b52018-09-05 17:03:25 +0100894}
895
896
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100897template<typename HalPolicy,
898 typename HalOperation = typename HalPolicy::Operation,
899 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100900bool GetOptionalInputActivation(const HalOperation& operation,
901 uint32_t inputIndex,
902 ActivationFn& activationFunction,
903 const HalModel& model,
904 const ConversionData& data)
905{
906 if (operation.inputs.size() <= inputIndex)
907 {
908 activationFunction = ActivationFn::kActivationNone;
909 }
910 else
911 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100912 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100913 {
914 return Fail("%s: Operation has invalid inputs", __func__);
915 }
916 }
917 return true;
918}
919
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100920template<typename HalPolicy,
921 typename ConvolutionDescriptor,
922 typename HalOperation = typename HalPolicy::Operation,
923 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100924bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
925 uint32_t dilationXIndex,
926 ConvolutionDescriptor& descriptor,
927 const HalModel& model,
928 const ConversionData& data)
929{
930 bool success = true;
931 if (operation.inputs.size() >= dilationXIndex + 2)
932 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933 success &= GetInputScalar<HalPolicy>(operation,
934 dilationXIndex,
935 HalPolicy::OperandType::INT32,
936 descriptor.m_DilationX,
937 model,
938 data);
939 success &= GetInputScalar<HalPolicy>(operation,
940 dilationXIndex + 1,
941 HalPolicy::OperandType::INT32,
942 descriptor.m_DilationY,
943 model,
944 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100945 }
946
947 return success;
948}
949
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100950template<typename HalPolicy,
951 typename HalOperand = typename HalPolicy::Operand,
952 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100953bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100954 std::vector<int32_t>& outValues,
955 const HalModel& model,
956 const ConversionData& data)
957{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100959 {
960 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
961 }
962
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100963 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100964 if (!startAddress)
965 {
966 return Fail("%s: failed to get operand address", __func__, operand.type);
967 }
968
969 // Check number of bytes is sensible
970 const uint32_t numBytes = operand.location.length;
971 if (numBytes % sizeof(int32_t) != 0)
972 {
973 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
974 __func__, numBytes, sizeof(int32_t));
975 }
976
977 outValues.resize(numBytes / sizeof(int32_t));
978 memcpy(outValues.data(), startAddress, numBytes);
979 return true;
980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985bool GetInputPaddingScheme(const HalOperation& operation,
986 uint32_t inputIndex,
987 PaddingScheme& outPaddingScheme,
988 const HalModel& model,
989 const ConversionData& data)
990{
991 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100993 {
994 return Fail("%s: failed to get padding scheme input value", __func__);
995 }
996
997 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
998 return true;
999}
1000
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001001template<typename HalPolicy,
1002 typename HalOperation = typename HalPolicy::Operation,
1003 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001004LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1005 uint32_t inputIndex,
1006 const HalModel& model,
1007 ConversionData& data)
1008{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001009 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001010 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1012
1013 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001014 if (!operand)
1015 {
1016 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1017 return LayerInputHandle();
1018 }
1019
1020 if (!IsOperandTypeSupportedForTensors(operand->type))
1021 {
1022 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1023 return LayerInputHandle();
1024 }
1025
Sadik Armagan44bcc022019-06-18 17:21:36 +01001026 try
arovir01b0717b52018-09-05 17:03:25 +01001027 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001028 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001029 if (IsDynamicTensor(operandTensorInfo))
1030 {
1031 Fail("%s: dynamic input tensors are not supported", __func__);
1032 return LayerInputHandle();
1033 }
arovir01b0717b52018-09-05 17:03:25 +01001034
Sadik Armagan44bcc022019-06-18 17:21:36 +01001035 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001036 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001037 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001038 {
1039 // NOTE: We must check whether we can support the input tensor on at least one
1040 // of the provided backends; otherwise we cannot convert the operation
1041 bool isInputSupported = false;
1042 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1043 IsInputSupported,
1044 data.m_Backends,
1045 isInputSupported,
1046 operandTensorInfo);
1047
1048 if (!isInputSupported)
1049 {
1050 Fail("%s: unsupported input tensor", __func__);
1051 return LayerInputHandle();
1052 }
1053
1054 BOOST_FALLTHROUGH; // intentional fallthrough
1055 }
1056 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001057 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001058 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001059 // The tensor is either an operand internal to the model, or a model input.
1060 // It can be associated with an ArmNN output slot for an existing layer.
1061
1062 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1063 const uint32_t operandIndex = operation.inputs[inputIndex];
1064 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001065 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001066 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001067 case HalOperandLifeTime::CONSTANT_REFERENCE:
1068 {
1069 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1070 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1071 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001072 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001073 bool isSupported = false;
1074 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1075 IsConstantSupported,
1076 data.m_Backends,
1077 isSupported,
1078 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001079 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001080 {
1081 return LayerInputHandle();
1082 }
1083
1084 armnn::IConnectableLayer* constantLayer =
1085 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1086 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1087 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1088
1089 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1090 }
1091 else
1092 {
1093 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001094 return LayerInputHandle();
1095 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001096 break;
arovir01b0717b52018-09-05 17:03:25 +01001097 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001098 default:
arovir01b0717b52018-09-05 17:03:25 +01001099 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001100 // Unsupported lifetime for an input tensor
1101 Fail("%s: unsupported lifetime for input tensor: %s",
1102 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001103 return LayerInputHandle();
1104 }
arovir01b0717b52018-09-05 17:03:25 +01001105 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001106 }
1107 catch (UnsupportedOperand<HalOperandType>& e)
1108 {
1109 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1110 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001111 }
1112}
1113
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114template<typename HalPolicy,
1115 typename HalOperation = typename HalPolicy::Operation,
1116 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001117bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1118 uint32_t operationOutputIndex,
1119 armnn::IConnectableLayer& layer,
1120 uint32_t layerOutputIndex,
1121 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001122 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001123{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001124 using HalOperand = typename HalPolicy::Operand;
1125
1126 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001127 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1128 {
1129 return false;
1130 }
1131
1132 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1133
1134 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1135 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1136
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001137 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001138
1139 return true;
1140}
1141
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142template<typename HalPolicy,
1143 typename HalOperation = typename HalPolicy::Operation,
1144 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001145armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1146 uint32_t inputIndex,
1147 const HalModel& model,
1148 ConversionData& data)
1149{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 using HalOperand = typename HalPolicy::Operand;
1151
1152 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153 if (!operand)
1154 {
1155 return armnn::DataLayout::NHWC;
1156 }
1157
1158 if (!IsBool(*operand))
1159 {
1160 return armnn::DataLayout::NHWC;
1161 }
1162
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001163 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001164 if (!valueAddress)
1165 {
1166 return armnn::DataLayout::NHWC;
1167 }
1168
1169 if (*(static_cast<const bool*>(valueAddress)))
1170 {
1171 return armnn::DataLayout::NCHW;
1172 }
1173 else
1174 {
1175 return armnn::DataLayout::NHWC;
1176 }
1177}
1178
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001179template<typename HalPolicy,
1180 typename HalOperation = typename HalPolicy::Operation,
1181 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001182bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1183 uint32_t outputIndex,
1184 armnn::IConnectableLayer& layer,
1185 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001186 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001187{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001188 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1189 outputIndex,
1190 layer,
1191 outputIndex,
1192 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001193 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001194}
1195
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196template<typename HalPolicy,
1197 typename HalOperation = typename HalPolicy::Operation,
1198 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001199bool ConvertToActivation(const HalOperation& operation,
1200 const char* operationName,
1201 const armnn::ActivationDescriptor& activationDesc,
1202 const HalModel& model,
1203 ConversionData& data)
1204{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001205 using HalOperand = typename HalPolicy::Operand;
1206
1207 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001208 if (!input.IsValid())
1209 {
1210 return Fail("%s: Input 0 is invalid", operationName);
1211 }
1212
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001213 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001214 if (!outputOperand)
1215 {
1216 return false;
1217 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001218
1219 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001220 if (IsDynamicTensor(outInfo))
1221 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001222 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001223 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001224
1225 bool isSupported = false;
1226 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1227 IsActivationSupported,
1228 data.m_Backends,
1229 isSupported,
1230 input.GetTensorInfo(),
1231 outInfo,
1232 activationDesc);
1233 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001234 {
1235 return false;
1236 }
1237
1238 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1239 BOOST_ASSERT(layer != nullptr);
1240 input.Connect(layer->GetInputSlot(0));
1241
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001242 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001243}
1244
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001245template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001246 typename HalOperation = typename HalPolicy::Operation,
1247 typename HalModel = typename HalPolicy::Model>
1248bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1249{
1250 armnn::ActivationDescriptor desc;
1251 desc.m_Function = armnn::ActivationFunction::ReLu;
1252
1253 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1254}
1255
1256template<typename HalPolicy,
1257 typename HalOperation = typename HalPolicy::Operation,
1258 typename HalModel = typename HalPolicy::Model>
1259bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1260{
1261 armnn::ActivationDescriptor desc;
1262 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1263 desc.m_A = 1.0f;
1264 desc.m_B = -1.0f;
1265
1266 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1267}
1268
1269template<typename HalPolicy,
1270 typename HalOperation = typename HalPolicy::Operation,
1271 typename HalModel = typename HalPolicy::Model>
1272bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1273{
1274 armnn::ActivationDescriptor desc;
1275 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1276 desc.m_A = 6.0f;
1277
1278 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1279}
1280
1281template<typename HalPolicy,
1282 typename HalOperation = typename HalPolicy::Operation,
1283 typename HalModel = typename HalPolicy::Model>
1284bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1285{
1286 armnn::ActivationDescriptor desc;
1287 desc.m_Function = armnn::ActivationFunction::TanH;
1288 desc.m_A = 1.0f; // android nn does not support tanH parameters
1289 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1290
1291 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1292}
1293
1294template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001295 typename HalOperation = typename HalPolicy::Operation,
1296 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001297bool ConvertPaddings(const HalOperation& operation,
1298 const HalModel& model,
1299 ConversionData& data,
1300 unsigned int rank,
1301 armnn::PadDescriptor& padDescriptor)
1302{
1303 using HalOperand = typename HalPolicy::Operand;
1304
1305 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1306 if (!paddingsOperand)
1307 {
1308 return Fail("%s: Could not read paddings operand", __func__);
1309 }
1310
1311 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1312 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1313 {
1314 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1315 }
1316
1317 std::vector<int32_t> paddings;
1318 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1319
1320 // add padding for each dimension of input tensor.
1321 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1322 {
1323 int paddingBeforeInput = paddings[i];
1324 int paddingAfterInput = paddings[i + 1];
1325
1326 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1327 {
1328 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1329 }
1330
1331 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1332 }
1333
1334 return true;
1335}
1336
1337template<typename HalPolicy,
1338 typename HalOperation = typename HalPolicy::Operation,
1339 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001340bool ConvertPooling2d(const HalOperation& operation,
1341 const char* operationName,
1342 armnn::PoolingAlgorithm poolType,
1343 const HalModel& model,
1344 ConversionData& data)
1345{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001346 using HalOperand = typename HalPolicy::Operand;
1347 using HalOperandType = typename HalPolicy::OperandType;
1348
1349 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001350 if (!input.IsValid())
1351 {
1352 return Fail("%s: Could not read input 0", operationName);
1353 }
1354
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001355 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001356 if (!output)
1357 {
1358 return Fail("%s: Could not read output 0", __func__);
1359 }
1360
1361 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1362 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1363
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001364 if (IsDynamicTensor(outputInfo))
1365 {
1366 return Fail("%s: Dynamic output tensors are not supported", __func__);
1367 }
1368
arovir01b0717b52018-09-05 17:03:25 +01001369 armnn::Pooling2dDescriptor desc;
1370 desc.m_PoolType = poolType;
1371 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001372 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001373
1374 ActivationFn activation;
1375
Sadik Armagan15d63e22019-07-26 16:59:35 +01001376 auto inputSize = operation.inputs.size();
1377
1378 if (inputSize >= 10)
1379 {
1380 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1381 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1382 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1383 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1384 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1385 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1386 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1387 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1388 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1389 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1390 {
1391 return Fail("%s: Operation has invalid inputs", operationName);
1392 }
1393
1394 if (Is12Operand(*output))
1395 {
1396 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1397 }
1398 }
1399 else
arovir01b0717b52018-09-05 17:03:25 +01001400 {
1401 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1402 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001403 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1404 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1405 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1406 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1407 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1408 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001409 {
1410 return Fail("%s: Operation has invalid inputs", operationName);
1411 }
1412
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001413 const unsigned int inputWidth = inputInfo.GetShape()[2];
1414 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001415
1416 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1417 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001418
1419 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001420 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001421 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001422 }
1423 }
1424
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001425 bool isSupported = false;
1426 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1427 IsPooling2dSupported,
1428 data.m_Backends,
1429 isSupported,
1430 inputInfo,
1431 outputInfo,
1432 desc);
1433 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001434 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001435 return false;
arovir01b0717b52018-09-05 17:03:25 +01001436 }
arovir01b0717b52018-09-05 17:03:25 +01001437
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001438 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1439 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001440 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001441 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001442 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001443
1444 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1445 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001446 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001447 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001448 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001449
1450 input.Connect(pooling2dLayer->GetInputSlot(0));
1451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001452 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001453}
1454
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001455template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001456 typename Operation = typename HalPolicy::Operation,
1457 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001458bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1459{
1460 using Operand = typename HalPolicy::Operand;
1461
1462 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1463 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1464
1465 if (!input0.IsValid() || !input1.IsValid())
1466 {
1467 return Fail("%s: Operation has invalid inputs", __func__);
1468 }
1469
1470 // The FuseActivation parameter is always the input index 2
1471 // and it should be optional
1472 ActivationFn activationFunction;
1473 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1474 {
1475 return Fail("%s: Operation has invalid inputs", __func__);
1476 }
1477
1478 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1479 if (!outputOperand)
1480 {
1481 return false;
1482 }
1483
1484 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1485 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1486
1487 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1488 if (IsDynamicTensor(outputInfo))
1489 {
1490 return Fail("%s: Dynamic output tensors are not supported", __func__);
1491 }
1492
1493 bool isSupported = false;
1494 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1495 IsAdditionSupported,
1496 data.m_Backends,
1497 isSupported,
1498 inputInfo0,
1499 inputInfo1,
1500 outputInfo);
1501 if (!isSupported)
1502 {
1503 return false;
1504 }
1505
1506 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1507 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1508
1509 if (endLayer != nullptr)
1510 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001511 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1512 if (!isReshapeSupported)
1513 {
1514 return false;
1515 }
1516
Mike Kelly46272802019-08-14 17:00:48 +01001517 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1518 }
1519 else
1520 {
1521 return Fail("%s: ProcessActivation failed", __func__);
1522 }
1523}
1524
1525template<typename HalPolicy,
1526 typename Operation = typename HalPolicy::Operation,
1527 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001528bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1529{
1530 using HalOperand = typename HalPolicy::Operand;
1531 using HalOperandType = typename HalPolicy::OperandType;
1532
1533 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1534 if (operation.inputs.size() <= 1)
1535 {
1536 return Fail("%s: Operation has insufficient arguments", __func__);
1537 }
1538
1539 // Get inputs and outputs
1540 const std::size_t numInputTensors = operation.inputs.size() - 1;
1541
1542 int32_t concatDim;
1543 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1544 {
1545 return Fail("%s: Operation has invalid inputs", __func__);
1546 }
1547
1548 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1549 if (!outputOperand)
1550 {
1551 return Fail("%s: Operation has no outputs", __func__);
1552 }
1553
1554
1555 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1556 armnn::TensorShape outputShape = outputInfo.GetShape();
1557
1558 //
1559 // handle negative concat dims along the lines of tensorflow as described here:
1560 // https://www.tensorflow.org/api_docs/python/tf/concat
1561 // "negative axis refers to axis + rank(values)-th dimension"
1562 //
1563 if (concatDim < 0)
1564 {
1565 concatDim += outputShape.GetNumDimensions();
1566 }
1567
1568 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1569 {
1570 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1571 }
1572
1573 std::vector<LayerInputHandle> inputHandles;
1574 std::vector<armnn::TensorShape> inputShapes;
1575
1576 inputHandles.reserve(numInputTensors);
1577 inputShapes.reserve(numInputTensors);
1578
1579 bool inputsHaveBeenReshaped = false;
1580 unsigned int tensorDimensionsAdded = 0;
1581
1582 for (uint32_t i = 0; i < numInputTensors; ++i)
1583 {
1584 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1585 if (!operand)
1586 {
1587 return Fail("%s: Operation has invalid inputs", __func__);
1588 }
1589
1590 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
1591 LayerInputHandle operandInputHandle =
1592 ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1593
1594 if (operandShape.GetNumDimensions() == 0)
1595 {
1596 return Fail("%s: Operands with rank 0 are not supported", __func__);
1597 }
1598
1599 if (RequiresReshape(operandShape))
1600 {
1601 inputsHaveBeenReshaped = true;
1602
1603 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1604
1605 // Expand the tensor to three dimensions
1606 if (operandShape.GetNumDimensions() == 2)
1607 {
1608 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1609 tensorDimensionsAdded = 1;
1610 }
1611 else
1612 {
1613 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1614 tensorDimensionsAdded = 2;
1615 }
1616
1617 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1618 *data.m_Network,
1619 operandInputHandle,
1620 reshapeInfo
1621 );
1622
1623 // Point to the reshape operation rather then the input operation
1624 operandShape = reshapeInfo.GetShape();
1625 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1626 }
1627
1628 inputShapes.emplace_back(operandShape);
1629 inputHandles.emplace_back(operandInputHandle);
1630
1631 if (!inputHandles.back().IsValid())
1632 {
1633 return Fail("%s: Operation has invalid inputs", __func__);
1634 }
1635 }
1636
1637 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1638
1639 if (inputsHaveBeenReshaped)
1640 {
1641 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1642 concatDim += tensorDimensionsAdded;
1643
1644 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1645 if (tensorDimensionsAdded == 1)
1646 {
1647 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1648 }
1649 else if (tensorDimensionsAdded == 2)
1650 {
1651 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1652 }
1653 }
1654
1655 // Check if permutations is required and get the pair of permutations required for the concatenation.
1656 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1657 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1658 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1659
1660 bool needPermute =
1661 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1662
1663 if (needPermute)
1664 {
1665 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1666 }
1667
1668 outputInfo.SetShape(outputShape);
1669
1670 // this is no-op for identity swizzles, otherwise it replaces both
1671 // the handles and shapes with the swizzled layer output handles and shapes
1672 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1673
1674 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1675 armnn::OriginsDescriptor concatDescriptor;
1676
1677 try
1678 {
1679 // The concat descriptor is always created across the only supported concat dimension
1680 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1681 concatDescriptor =
1682 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1683 }
1684 catch (const armnn::Exception& error)
1685 {
1686 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1687 }
1688
1689 // Validate the output shape is correct given the input shapes based on the
1690 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1691 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1692 {
1693 return Fail("%s: Error validating the output shape for concat", __func__);
1694 }
1695
1696 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1697 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1698 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1699
1700 bool isSupported = false;
1701 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1702 IsConcatSupported,
1703 data.m_Backends,
1704 isSupported,
1705 inputTensorInfos,
1706 outputInfo,
1707 concatDescriptor);
1708 if (!isSupported)
1709 {
1710 return false;
1711 }
1712
1713 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1714 assert(layer != nullptr);
1715 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1716
1717 // Connect inputs to the layer
1718 const int numInputSlots = layer->GetNumInputSlots();
1719 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1720 for (int i = 0; i < numInputSlots; ++i)
1721 {
1722 // connect the input directly to the merge (concat) layer
1723 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1724 }
1725
1726 if (needPermute)
1727 {
1728 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1729 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1730 layer->GetOutputSlot(0),
1731 permutationPair.second);
1732 layer = &deswizzleLayer;
1733 }
1734
1735 if (inputsHaveBeenReshaped)
1736 {
1737 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1738
1739 // Undo the reshape knowing the amount of dimensions added
1740 if (tensorDimensionsAdded == 1)
1741 {
1742 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1743 afterConcatInfo.GetShape()[2] }));
1744 }
1745 else if (tensorDimensionsAdded == 2)
1746 {
1747 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1748 }
1749
1750 layer = &AddReshapeLayer(
1751 *data.m_Network,
1752 layer->GetOutputSlot(0),
1753 afterConcatInfo
1754 );
1755 }
1756
1757 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1758}
1759
1760template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001761 typename HalOperation = typename HalPolicy::Operation,
1762 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001763bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1764{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001765 using HalOperand = typename HalPolicy::Operand;
1766 using HalOperandType = typename HalPolicy::OperandType;
1767
1768 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001769 if (!input.IsValid())
1770 {
1771 return Fail("%s: Operation has invalid inputs", __func__);
1772 }
1773
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001774 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001775 if (!output)
1776 {
1777 return Fail("%s: Could not read output 0", __func__);
1778 }
1779
1780 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001781 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001782
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001783 if (IsDynamicTensor(outputInfo))
1784 {
1785 return Fail("%s: Dynamic output tensors are not supported", __func__);
1786 }
1787
Mike Kellyb5fdf382019-06-11 16:35:25 +01001788 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001789 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1790 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001791
1792 if (!weightsPin.IsValid() || !biasPin.IsValid())
1793 {
1794 return Fail("%s: Operation has invalid inputs", __func__);
1795 }
1796
1797 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001798 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001799 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1800
1801 armnn::Convolution2dDescriptor desc;
1802 desc.m_DataLayout = armnn::DataLayout::NHWC;
1803 ActivationFn activation;
1804
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001805 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001806 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001807 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1808 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1809 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1810 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1811 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1812 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001813 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001814 {
1815 return Fail("%s: Operation has invalid inputs", __func__);
1816 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001817 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001818 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001819 {
1820 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001821 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1822 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1823 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001824 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001825 {
1826 return Fail("%s: Operation has invalid inputs", __func__);
1827 }
1828
1829 const uint32_t kernelX = weights.GetShape()[2];
1830 const uint32_t kernelY = weights.GetShape()[1];
1831 const uint32_t inputX = inputInfo.GetShape()[2];
1832 const uint32_t inputY = inputInfo.GetShape()[1];
1833
1834 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1835 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001836 }
1837 else
1838 {
1839 return Fail("%s: Unsupported number of operation inputs", __func__);
1840 }
1841
1842 desc.m_BiasEnabled = true;
1843 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1844
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001845 bool isSupported = false;
1846 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1847 IsConvolution2dSupported,
1848 data.m_Backends,
1849 isSupported,
1850 inputInfo,
1851 outputInfo,
1852 desc,
1853 weights.GetInfo(),
1854 biases);
1855 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001856 {
1857 return false;
1858 }
1859
1860 armnn::IConnectableLayer* startLayer =
1861 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1862
1863 if (!startLayer)
1864 {
1865 return Fail("%s: AddConvolution2dLayer failed", __func__);
1866 }
1867
1868 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1869
1870 if (!endLayer)
1871 {
1872 return Fail("%s: ProcessActivation failed", __func__);
1873 }
1874
1875 input.Connect(startLayer->GetInputSlot(0));
1876
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001877 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001878}
1879
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001880template<typename HalPolicy,
1881 typename HalOperation = typename HalPolicy::Operation,
1882 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001883bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1884{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001885 using HalOperand = typename HalPolicy::Operand;
1886 using HalOperandType = typename HalPolicy::OperandType;
1887
1888 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001889
1890 if (!input.IsValid())
1891 {
1892 return Fail("%s: Operation has invalid inputs", __func__);
1893 }
1894
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001895 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001896
1897 if (!output)
1898 {
1899 return Fail("%s: Could not read output 0", __func__);
1900 }
1901
1902 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001903 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001904
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001905 if (IsDynamicTensor(outputInfo))
1906 {
1907 return Fail("%s: Dynamic output tensors are not supported", __func__);
1908 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001909
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001910 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001911 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001912 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001913
1914 if (weightsOperand == nullptr)
1915 {
1916 return Fail("%s: Operand is invalid", __func__);
1917 }
1918 armnn::DepthwiseConvolution2dDescriptor desc;
1919 desc.m_DataLayout = armnn::DataLayout::NHWC;
1920
Mike Kellyb5fdf382019-06-11 16:35:25 +01001921 // Reinterpret weight data as [ H, W, I, M ]
1922 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1923 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001924 inputInfo.GetShape()[3],
1925 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001926
1927 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1928 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1929
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001930 const ConstTensorPin weightsPin =
1931 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1932 1,
1933 model,
1934 data,
1935 HWIMToMIHW,
1936 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001937
1938 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001939 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001940
1941 if (!weightsPin.IsValid() || !biasPin.IsValid())
1942 {
1943 return Fail("%s: Operation has invalid inputs", __func__);
1944 }
1945
1946 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1947 armnn::ConstTensor bias = biasPin.GetConstTensor();
1948 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1949
1950 ActivationFn activation;
1951
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001952 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001953 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001954 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1955 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1956 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1957 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1958 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1959 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001960 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001961 {
1962 return Fail("%s: Operation has invalid inputs", __func__);
1963 }
1964 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001965 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001966 {
1967 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001968 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1969 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1970 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001971 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001972 {
1973 return Fail("%s: Operation has invalid inputs", __func__);
1974 }
1975
1976 const uint32_t kernelX = weights.GetShape()[3];
1977 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001978 const uint32_t inputX = inputInfo.GetShape()[2];
1979 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001980
1981 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1982 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1983 }
1984 else
1985 {
1986 return Fail("%s: Unsupported number of operation inputs", __func__);
1987 }
1988
1989 desc.m_BiasEnabled = true;
1990 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1991
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001992 bool isSupported = false;
1993 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1994 IsDepthwiseConvolutionSupported,
1995 data.m_Backends,
1996 isSupported,
1997 inputInfo,
1998 outputInfo,
1999 desc,
2000 weights.GetInfo(),
2001 biases);
2002 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002003 {
2004 return false;
2005 }
2006
2007 armnn::IConnectableLayer* startLayer =
2008 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2009 if (!startLayer)
2010 {
2011 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2012 }
2013
2014 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2015 if (!endLayer)
2016 {
2017 return Fail("%s: ProcessActivation failed", __func__);
2018 }
2019
2020 input.Connect(startLayer->GetInputSlot(0));
2021
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002022 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002023}
2024
Mike Kelly3c673942019-07-25 09:26:06 +01002025template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002026 typename Operation = typename HalPolicy::Operation,
2027 typename Model = typename HalPolicy::Model>
2028bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002029{
Mike Kelly46272802019-08-14 17:00:48 +01002030 using Operand = typename HalPolicy::Operand;
2031
2032 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2033 if (!input.IsValid())
2034 {
2035 return Fail("%s: Operation has invalid input", __func__);
2036 }
2037
2038 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2039 if (!outputOperand)
2040 {
2041 return Fail("%s: Operation has invalid outputs", __func__);
2042 }
2043
2044 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2045 if (IsDynamicTensor(outputInfo))
2046 {
2047 return Fail("%s: Dynamic output tensors are not supported", __func__);
2048 }
2049
2050 bool isSupported = false;
2051 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2052 IsDequantizeSupported,
2053 data.m_Backends,
2054 isSupported,
2055 input.GetTensorInfo(),
2056 GetTensorInfoForOperand(*outputOperand));
2057 if (!isSupported)
2058 {
2059 return false;
2060 }
2061
2062 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2063 assert(layer != nullptr);
2064 input.Connect(layer->GetInputSlot(0));
2065
2066 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2067}
2068
2069template<typename HalPolicy,
2070 typename Operation = typename HalPolicy::Operation,
2071 typename Model = typename HalPolicy::Model>
2072bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2073{
2074 using Operand = typename HalPolicy::Operand;
2075
2076 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2077 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2078
2079 if (!input0.IsValid() || !input1.IsValid())
2080 {
2081 return Fail("%s: Operation has invalid inputs", __func__);
2082 }
2083
2084 // The FuseActivation parameter is always the input index 2
2085 // and it should be optional
2086 ActivationFn activationFunction;
2087 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2088 {
2089 return Fail("%s: Operation has invalid inputs", __func__);
2090 }
2091
2092 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2093 if (!output)
2094 {
2095 return Fail("%s: Could not read output 0", __func__);
2096 }
2097
2098 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2099 if (IsDynamicTensor(outputInfo))
2100 {
2101 return Fail("%s: Dynamic output tensors are not supported", __func__);
2102 }
2103
2104 bool isSupported = false;
2105 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2106 IsDivisionSupported,
2107 data.m_Backends,
2108 isSupported,
2109 input0.GetTensorInfo(),
2110 input1.GetTensorInfo(),
2111 outputInfo);
2112 if (!isSupported)
2113 {
2114 return false;
2115 }
2116
2117 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2118 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2119
2120 if (endLayer)
2121 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002122 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2123 if (!isReshapeSupported)
2124 {
2125 return false;
2126 }
2127
Mike Kelly46272802019-08-14 17:00:48 +01002128 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2129 }
2130 return Fail("%s: ProcessActivation failed", __func__);
2131}
2132
2133template<typename HalPolicy,
2134 typename Operation = typename HalPolicy::Operation,
2135 typename Model = typename HalPolicy::Model>
2136bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2137{
2138 using Operand = typename HalPolicy::Operand;
2139
2140 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2141 if (!input.IsValid())
2142 {
2143 return Fail("%s: Operation has invalid inputs", __func__);
2144 }
2145
2146 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2147 if (!outputOperand)
2148 {
2149 return Fail("%s: Operation has invalid outputs", __func__);
2150 }
2151
2152 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2153 if (IsDynamicTensor(outputInfo))
2154 {
2155 return Fail("%s: Dynamic output tensors are not supported", __func__);
2156 }
2157
2158 bool isSupported = false;
2159 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2160 IsFloorSupported,
2161 data.m_Backends,
2162 isSupported,
2163 input.GetTensorInfo(),
2164 outputInfo);
2165 if (!isSupported)
2166 {
2167 return false;
2168 }
2169
2170 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2171 assert(layer != nullptr);
2172 input.Connect(layer->GetInputSlot(0));
2173
2174 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2175}
2176
2177template<typename HalPolicy,
2178 typename Operation = typename HalPolicy::Operation,
2179 typename Model = typename HalPolicy::Model>
2180bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2181{
2182 using Operand = typename HalPolicy::Operand;
2183
2184 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2185 if (!input.IsValid())
2186 {
2187 return Fail("%s: Operation has invalid inputs", __func__);
2188 }
2189
2190 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2191 if (!output)
2192 {
2193 return Fail("%s: Could not read output 0", __func__);
2194 }
2195
2196 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2197 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2198
2199 if (IsDynamicTensor(outputInfo))
2200 {
2201 return Fail("%s: Dynamic output tensors are not supported", __func__);
2202 }
2203
2204 // ArmNN does not currently support non-fixed weights or bias
2205 ConstTensorPin weightsPin =
2206 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2207 ConstTensorPin biasPin =
2208 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2209
2210 if (!weightsPin.IsValid() || !biasPin.IsValid())
2211 {
2212 return Fail("%s: Operation has invalid inputs", __func__);
2213 }
2214
2215 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2216 armnn::ConstTensor bias = biasPin.GetConstTensor();
2217 armnn::TensorInfo reshapedInfo = inputInfo;
2218
2219 try
2220 {
2221 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2222 } catch (const std::exception &e) {
2223 return Fail("%s: %s", __func__, e.what());
2224 }
2225
2226 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2227 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2228
2229 ActivationFn activationFunction;
2230 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2231 {
2232 return Fail("%s: Operation has invalid inputs", __func__);
2233 }
2234
2235 armnn::FullyConnectedDescriptor desc;
2236 desc.m_TransposeWeightMatrix = true;
2237 desc.m_BiasEnabled = true;
2238
2239 bool isSupported = false;
2240 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2241 IsFullyConnectedSupported,
2242 data.m_Backends,
2243 isSupported,
2244 reshapedInfo,
2245 outputInfo,
2246 weights.GetInfo(),
2247 bias.GetInfo(),
2248 desc);
2249 if (!isSupported)
2250 {
2251 return false;
2252 }
2253
2254 armnn::IConnectableLayer* startLayer =
2255 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2256 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2257
2258 if (endLayer != nullptr)
2259 {
2260 if (inputInfo.GetNumDimensions() > 2U)
2261 {
2262 armnn::ReshapeDescriptor reshapeDescriptor;
2263 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2264
2265 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2266 assert(reshapeLayer != nullptr);
2267 input.Connect(reshapeLayer->GetInputSlot(0));
2268 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2269 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2270 }
2271 else
2272 {
2273 input.Connect(startLayer->GetInputSlot(0));
2274 }
2275
2276 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2277 }
2278 else
2279 {
2280 return Fail("%s: ProcessActivation failed", __func__);
2281 }
2282}
2283
2284template<typename HalPolicy,
2285 typename Operation = typename HalPolicy::Operation,
2286 typename Model = typename HalPolicy::Model>
2287bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2288{
Mike Kelly999e2092019-08-15 10:46:46 +01002289 if (operation.inputs.size() != 1)
2290 {
2291 return Fail("%s: Optional inputs are not supported", __func__);
2292 }
2293
Mike Kelly46272802019-08-14 17:00:48 +01002294 using Operand = typename HalPolicy::Operand;
2295
2296 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2297 if (!input.IsValid())
2298 {
2299 return Fail("%s: Operation has invalid inputs", __func__);
2300 }
2301
2302 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2303 if (!output)
2304 {
2305 return Fail("%s: Could not read output 0", __func__);
2306 }
2307
2308 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2309 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2310
2311 if (IsDynamicTensor(outputInfo))
2312 {
2313 return Fail("%s: Dynamic output tensors are not supported", __func__);
2314 }
2315 if (outputInfo.GetNumDimensions() != 4u)
2316 {
2317 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2318 }
2319
2320 armnn::L2NormalizationDescriptor desc;
2321 desc.m_DataLayout = armnn::DataLayout::NHWC;
2322
2323 bool isSupported = false;
2324 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2325 IsL2NormalizationSupported,
2326 data.m_Backends,
2327 isSupported,
2328 inputInfo,
2329 outputInfo,
2330 desc);
2331 if (!isSupported)
2332 {
2333 return false;
2334 }
2335
2336 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2337 assert(layer != nullptr);
2338 input.Connect(layer->GetInputSlot(0));
2339
2340 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2341}
2342
2343template<typename HalPolicy,
2344 typename Operation = typename HalPolicy::Operation,
2345 typename Model = typename HalPolicy::Model>
2346bool ConvertLocalResponseNormalization(const Operation& operation,
2347 const Model& model,
2348 ConversionData& data)
2349{
Mike Kelly999e2092019-08-15 10:46:46 +01002350 if (operation.inputs.size() != 5)
2351 {
2352 return Fail("%s: Optional inputs are not supported", __func__);
2353 }
2354
Mike Kelly46272802019-08-14 17:00:48 +01002355 using Operand = typename HalPolicy::Operand;
2356 using OperandType = typename HalPolicy::OperandType;
2357
2358 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2359 if (!input.IsValid())
2360 {
2361 return Fail("%s: Operation has invalid inputs", __func__);
2362 }
2363
2364 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2365 if (!output)
2366 {
2367 return Fail("%s: Could not read output 0", __func__);
2368 }
2369
2370 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2371 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2372
2373 if (IsDynamicTensor(outputInfo))
2374 {
2375 return Fail("%s: Dynamic output tensors are not supported", __func__);
2376 }
2377 if (outputInfo.GetNumDimensions() != 4u)
2378 {
2379 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2380 }
2381
2382 armnn::NormalizationDescriptor descriptor;
2383 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2384 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2385 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2386
2387 if (!input.IsValid() ||
2388 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2389 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2390 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2391 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2392 {
2393 return Fail("%s: Operation has invalid inputs", __func__);
2394 }
2395
2396 // ArmNN expects normSize to be the full size of the normalization
2397 // window rather than the radius as in AndroidNN.
2398 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2399
2400 bool isSupported = false;
2401 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2402 IsNormalizationSupported,
2403 data.m_Backends,
2404 isSupported,
2405 inputInfo,
2406 outputInfo,
2407 descriptor);
2408 if (!isSupported)
2409 {
2410 return false;
2411 }
2412
2413
2414 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2415 assert(layer != nullptr);
2416 input.Connect(layer->GetInputSlot(0));
2417
2418 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2419}
2420
2421template<typename HalPolicy,
2422 typename Operation = typename HalPolicy::Operation,
2423 typename Model = typename HalPolicy::Model>
2424bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2425{
2426 using Operand = typename HalPolicy::Operand;
2427
2428 armnn::ActivationDescriptor desc;
2429 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2430
2431 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2432}
2433
2434template<typename HalPolicy,
2435 typename Operation = typename HalPolicy::Operation,
2436 typename Model = typename HalPolicy::Model>
2437bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2438{
2439 using Operand = typename HalPolicy::Operand;
2440
2441 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2442 if (!input.IsValid())
2443 {
2444 return Fail("%s: Operation has invalid inputs", __func__);
2445 }
2446
2447 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2448 if (!output)
2449 {
2450 return Fail("%s: Could not read output 0", __func__);
2451 }
2452
2453 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2454 if (IsDynamicTensor(outputInfo))
2455 {
2456 return Fail("%s: Dynamic output tensors are not supported", __func__);
2457 }
2458
2459 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2460 if (!axisOperand)
2461 {
2462 return Fail("%s: Could not read input 1", __func__);
2463 }
2464
2465 std::vector<int32_t> axis;
2466 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2467 {
2468 return Fail("%s: Input 1 has invalid values", __func__);
2469 }
2470
2471 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2472
2473 // Convert the axis to unsigned int and remove duplicates.
2474 unsigned int rank = inputInfo.GetNumDimensions();
2475 std::set<unsigned int> uniqueAxis;
2476 std::transform(axis.begin(), axis.end(),
2477 std::inserter(uniqueAxis, uniqueAxis.begin()),
2478 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2479
2480 // Get the "keep dims" flag.
2481 int32_t keepDims = 0;
2482 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2483 {
2484 return Fail("%s: Could not read input 2", __func__);
2485 }
2486
2487 armnn::MeanDescriptor descriptor;
2488 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2489 descriptor.m_KeepDims = keepDims > 0;
2490
2491 bool isSupported = false;
2492 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2493 IsMeanSupported,
2494 data.m_Backends,
2495 isSupported,
2496 inputInfo,
2497 outputInfo,
2498 descriptor);
2499 if (!isSupported)
2500 {
2501 return false;
2502 }
2503
2504 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2505 assert(layer != nullptr);
2506 input.Connect(layer->GetInputSlot(0));
2507
2508 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2509}
2510
2511template<typename HalPolicy,
2512 typename Operation = typename HalPolicy::Operation,
2513 typename Model = typename HalPolicy::Model>
2514bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2515{
2516 using Operand = typename HalPolicy::Operand;
2517
2518 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2519 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2520
2521 if (!input0.IsValid() || !input1.IsValid())
2522 {
2523 return Fail("%s: Operation has invalid inputs", __func__);
2524 }
2525
2526 // The FuseActivation parameter is always the input index 2
2527 // and it should be optional
2528 ActivationFn activationFunction;
2529 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2530 {
2531 return Fail("%s: Operation has invalid inputs", __func__);
2532 }
2533
2534 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2535
2536 if (outputOperand == nullptr)
2537 {
2538 return false;
2539 }
2540
2541 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2542 if (IsDynamicTensor(outputInfo))
2543 {
2544 return Fail("%s: Dynamic output tensors are not supported", __func__);
2545 }
2546
2547 bool isSupported = false;
2548 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2549 IsMultiplicationSupported,
2550 data.m_Backends,
2551 isSupported,
2552 input0.GetTensorInfo(),
2553 input1.GetTensorInfo(),
2554 outputInfo);
2555 if (!isSupported)
2556 {
2557 return false;
2558 }
2559
2560 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2561 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2562
2563 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2564 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2565
2566 if (endLayer != nullptr)
2567 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002568 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2569 if (!isReshapeSupported)
2570 {
2571 return false;
2572 }
2573
Mike Kelly46272802019-08-14 17:00:48 +01002574 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2575 }
2576 else
2577 {
2578 return Fail("%s: ProcessActivation failed", __func__);
2579 }
2580}
2581
2582template<typename HalPolicy,
2583 typename Operation = typename HalPolicy::Operation,
2584 typename Model = typename HalPolicy::Model>
2585bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2586{
2587 using Operand = typename HalPolicy::Operand;
2588
Mike Kelly3c673942019-07-25 09:26:06 +01002589 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2590 if (!input.IsValid())
2591 {
2592 return Fail("%s: Operation has invalid inputs", __func__);
2593 }
2594
2595 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2596 unsigned int rank = inputInfo.GetNumDimensions();
2597
2598 armnn::PadDescriptor descriptor;
2599 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2600 {
2601 return Fail("%s: Could not convert paddings", __func__);
2602 }
2603
2604 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2605 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2606 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2607 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2608 {
2609 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2610 }
2611
Mike Kelly46272802019-08-14 17:00:48 +01002612 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002613 if (!output)
2614 {
2615 return Fail("%s: Could not read output", __func__);
2616 }
2617
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002618 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002619 if (IsDynamicTensor(outputInfo))
2620 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002621 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002622 }
2623
2624 bool isSupported = false;
2625 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2626 IsPadSupported,
2627 data.m_Backends,
2628 isSupported,
2629 inputInfo,
2630 outputInfo,
2631 descriptor);
2632 if (!isSupported)
2633 {
2634 return false;
2635 }
2636
2637 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2638 assert(layer != nullptr);
2639 input.Connect(layer->GetInputSlot(0));
2640 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2641
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002642 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002643}
2644
Mike Kelly0a879362019-07-29 16:56:31 +01002645template<typename HalPolicy,
2646 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002647 typename Model = typename HalPolicy::Model>
2648bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2649{
2650 using Operand = typename HalPolicy::Operand;
2651
2652 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2653 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2654 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2655
2656 if (inputOperand == nullptr
2657 || requestedShapeOperand == nullptr
2658 || outputOperand == nullptr)
2659 {
2660 return Fail("%s: Operation has invalid inputs", __func__);
2661 }
2662
2663 if (requestedShapeOperand->dimensions.size() != 1)
2664 {
2665 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2666 __func__, requestedShapeOperand->dimensions.size());
2667 }
2668
2669 std::vector<int32_t> targetDimensions;
2670 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2671 {
2672 return Fail("%s: Could not read values of input 1", __func__);
2673 }
2674
2675 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2676
2677 Shape requestedShape;
2678 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2679 // function that resolves these values into a fully specified tensor shape.
2680 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2681 {
2682 return Fail("%s: Failed to resolve the requested shape", __func__);
2683 }
2684
2685 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2686 if (!SameShape(requestedShape, outputOperandShape))
2687 {
2688 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2689 }
2690
2691 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2692 if (!input.IsValid())
2693 {
2694 return Fail("%s: Could not read input 0", __func__);
2695 }
2696
2697 armnn::ReshapeDescriptor reshapeDescriptor;
2698 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2699 requestedShape.dimensions.data());
2700
2701 bool isSupported = false;
2702 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2703 IsReshapeSupported,
2704 data.m_Backends,
2705 isSupported,
2706 input.GetTensorInfo(),
2707 reshapeDescriptor);
2708 if (!isSupported)
2709 {
2710 return false;
2711 }
2712
2713 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2714 assert(layer != nullptr);
2715 input.Connect(layer->GetInputSlot(0));
2716
2717 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2718}
2719
2720template<typename HalPolicy,
2721 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002722 typename Model = typename HalPolicy::Model>
2723bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2724{
Mike Kelly46272802019-08-14 17:00:48 +01002725 using Operand = typename HalPolicy::Operand;
2726
Mike Kelly0a879362019-07-29 16:56:31 +01002727 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2728 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2729
2730 if (!input0.IsValid() || !input1.IsValid())
2731 {
2732 return Fail("%s: Operation has invalid inputs", __func__);
2733 }
2734
2735 // The FuseActivation parameter is always the input index 2
2736 // and it should be optional
2737 ActivationFn activationFunction;
2738 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2739 {
2740 return Fail("%s: Operation has invalid inputs", __func__);
2741 }
2742
2743 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2744 if (!output)
2745 {
2746 return Fail("%s: Could not read output 0", __func__);
2747 }
2748
2749 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2750 if (IsDynamicTensor(outputInfo))
2751 {
2752 return Fail("%s: Dynamic output tensors are not supported", __func__);
2753 }
2754
2755 bool isSupported = false;
2756 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2757 IsSubtractionSupported,
2758 data.m_Backends,
2759 isSupported,
2760 input0.GetTensorInfo(),
2761 input1.GetTensorInfo(),
2762 outputInfo);
2763 if (!isSupported)
2764 {
2765 return false;
2766 }
2767
2768 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2769 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2770
2771 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2772 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2773
2774 if (endLayer)
2775 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002776 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2777 if (!isReshapeSupported)
2778 {
2779 return false;
2780 }
Mike Kelly0a879362019-07-29 16:56:31 +01002781 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2782 }
2783
2784 return Fail("%s: ProcessActivation failed", __func__);
2785}
2786
Finn Williams23b87b32019-07-30 11:44:05 +01002787template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002788 typename Operation = typename HalPolicy::Operation,
2789 typename Model = typename HalPolicy::Model>
2790bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2791{
2792 using Operand = typename HalPolicy::Operand;
2793
2794 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2795 if (!input.IsValid())
2796 {
2797 return Fail("%s: Operation has invalid inputs", __func__);
2798 }
2799
2800 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2801 unsigned int rank = inputInfo.GetNumDimensions();
2802 if (rank > 4)
2803 {
2804 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2805 }
2806
2807 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2808 if (!output)
2809 {
2810 return Fail("%s: Could not read output 0", __func__);
2811 }
2812
2813 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2814 {
2815 return Fail("%s: Dynamic output tensors are not supported", __func__);
2816 }
2817
2818 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2819 // if the operand index is out of bounds.
2820 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2821
2822 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2823
2824 std::vector<int32_t> axis;
2825 if (!axisOperand)
2826 {
2827 axis.assign(dimensionSequence,
2828 dimensionSequence + rank);
2829 }
2830 else
2831 {
2832 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2833 }
2834
2835 std::vector<uint32_t> outputDims;
2836 for (unsigned int i = 0; i < rank; i++)
2837 {
2838 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2839 auto currentDimension = inputInfo.GetShape()[i];
2840 if (skipSqueeze || currentDimension != 1)
2841 {
2842 outputDims.push_back(currentDimension);
2843 }
2844 }
2845
2846 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2847
2848 armnn::TensorInfo outputInfo = inputInfo;
2849 outputInfo.SetShape(outShape);
2850
2851 armnn::ReshapeDescriptor reshapeDesc;
2852 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2853
2854 bool isSupported = false;
2855 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2856 IsReshapeSupported,
2857 data.m_Backends,
2858 isSupported,
2859 inputInfo,
2860 reshapeDesc);
2861 if (!isSupported)
2862 {
2863 return false;
2864 }
2865
2866 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2867 assert(layer != nullptr);
2868 input.Connect(layer->GetInputSlot(0));
2869
2870 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2871}
2872
2873template<typename HalPolicy,
2874 typename Operation = typename HalPolicy::Operation,
2875 typename Model = typename HalPolicy::Model>
2876bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2877{
2878 using Operand = typename HalPolicy::Operand;
2879
2880 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2881 if (!input.IsValid())
2882 {
2883 return Fail("%s: Operation has invalid inputs", __func__);
2884 }
2885
2886 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2887 unsigned int rank = inputInfo.GetNumDimensions();
2888 if (rank > 4)
2889 {
2890 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2891 }
2892
2893 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2894 if (!output)
2895 {
2896 return Fail("%s: Could not read output 0", __func__);
2897 }
2898
2899 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2900 if (IsDynamicTensor(outputInfo))
2901 {
2902 return Fail("%s: Dynamic output tensors are not supported", __func__);
2903 }
2904
2905 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2906 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2907 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
2908
2909 std::vector<int32_t> beginValues;
2910 std::vector<int32_t> endValues;
2911 std::vector<int32_t> stridesValues;
2912
2913 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
2914 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
2915 {
2916 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
2917 {
2918 return false;
2919 }
2920
2921 if (operandValues.size() != rank)
2922 {
2923 return false;
2924 }
2925
2926 return true;
2927 };
2928
2929 if (!ValidateInputOperands(*beginOperand, beginValues)
2930 || !ValidateInputOperands(*endOperand, endValues)
2931 || !ValidateInputOperands(*stridesOperand, stridesValues))
2932 {
2933 return Fail("%s: Operation has invalid input operand", __func__);
2934 }
2935
2936 // Stride cannot have value '0'
2937 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
2938 {
2939 return Fail("%s: Stride must be non-zero value.", __func__);
2940 }
2941
2942 armnn::StridedSliceDescriptor descriptor;
2943 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
2944 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
2945 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
2946 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2947
2948 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
2949 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
2950 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
2951 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
2952 {
2953 return Fail("%s: Operation has invalid inputs", __func__);
2954 }
2955
2956 bool isSupported = false;
2957 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2958 IsStridedSliceSupported,
2959 data.m_Backends,
2960 isSupported,
2961 inputInfo,
2962 outputInfo,
2963 descriptor);
2964 if (!isSupported)
2965 {
2966 return false;
2967 }
2968
2969 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
2970 assert(layer != nullptr);
2971 input.Connect(layer->GetInputSlot(0));
2972
2973 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2974}
2975
2976template<typename HalPolicy,
2977 typename Operation = typename HalPolicy::Operation,
2978 typename Model = typename HalPolicy::Model>
2979bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
2980{
2981 using Operand = typename HalPolicy::Operand;
2982
2983 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2984 if (!input.IsValid())
2985 {
2986 return Fail("%s: Operation has invalid inputs", __func__);
2987 }
2988
2989 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2990 unsigned int rank = inputInfo.GetNumDimensions();
2991 if (rank > 4)
2992 {
2993 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2994 }
2995
2996 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
2997 // if the operand index is out of bounds.
2998 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2999
3000 std::vector<int32_t> perm(rank);
3001 if (!permOperand)
3002 {
3003 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3004 for (unsigned int i = rank; i > 0; i--)
3005 {
3006 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3007 }
3008 }
3009 else
3010 {
3011 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3012 }
3013
3014 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3015
3016 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3017 if (!permutationVector.IsEqual(NHWCToArmNN)
3018 && !permutationVector.IsEqual(ArmNNToNHWC)
3019 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3020 {
3021 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3022 }
3023
3024 armnn::PermuteDescriptor permuteDesc;
3025 permuteDesc.m_DimMappings = permutationVector;
3026
3027 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3028 if (!output)
3029 {
3030 return Fail("%s: Could not read output 0", __func__);
3031 }
3032
3033 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3034
3035 bool isSupported = false;
3036 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3037 IsPermuteSupported,
3038 data.m_Backends,
3039 isSupported,
3040 inputInfo,
3041 outputInfo,
3042 permuteDesc);
3043 if (!isSupported)
3044 {
3045 return false;
3046 }
3047
3048 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3049 assert(layer != nullptr);
3050 input.Connect(layer->GetInputSlot(0));
3051
3052 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3053}
3054
3055template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003056 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003057 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003058 typename HalModel = typename HalPolicy::Model>
3059bool ConvertBatchToSpaceNd(const HalOperation& operation,
3060 const HalModel& model,
3061 ConversionData& data)
3062{
Finn Williams23b87b32019-07-30 11:44:05 +01003063
3064 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3065 if (!input.IsValid())
3066 {
3067 return Fail("%s: Operation has invalid inputs", __func__);
3068 }
3069
3070 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3071 if (!output)
3072 {
3073 return Fail("%s: Could not read output 0", __func__);
3074 }
3075
3076 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3077 if (IsDynamicTensor(outputInfo))
3078 {
3079 return Fail("%s: Dynamic output tensors are not supported", __func__);
3080 }
3081
3082 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3083 if (!blockOperand)
3084 {
3085 return Fail("%s: Could not read input 1", __func__);
3086 }
3087
3088 // Convert the block operand to int32
3089 std::vector<int32_t> block;
3090 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3091 {
3092 return Fail("%s: Input 1 has invalid values", __func__);
3093 }
3094
3095 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3096
3097 unsigned int rank = inputInfo.GetNumDimensions();
3098 if (rank != 4)
3099 {
3100 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3101 }
3102
3103 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3104 {
3105 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3106 " greater than or equal to 1", __func__);
3107 }
3108
3109 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3110 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3111 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3112
3113 if (Is12Operand(*output))
3114 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003115 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003116 }
3117 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3118 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3119
3120 bool isSupported = false;
3121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3122 IsBatchToSpaceNdSupported,
3123 data.m_Backends,
3124 isSupported,
3125 inputInfo,
3126 outputInfo,
3127 batchToSpaceNdDesc);
3128 if (!isSupported)
3129 {
3130 return false;
3131 }
3132
3133 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3134 assert(layer != nullptr);
3135 input.Connect(layer->GetInputSlot(0));
3136
3137 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3138}
Mike Kelly0a879362019-07-29 16:56:31 +01003139
Finn Williamsd74c5052019-07-30 17:06:00 +01003140template<typename HalPolicy,
3141 typename HalOperation = typename HalPolicy::Operation,
3142 typename HalOperand = typename HalPolicy::Operand,
3143 typename HalModel = typename HalPolicy::Model>
3144bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3145{
3146 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3147 if (!input.IsValid())
3148 {
3149 return Fail("%s: Operation has invalid inputs", __func__);
3150 }
3151
3152 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3153 unsigned int rank = inputInfo.GetNumDimensions();
3154 unsigned int spatialDim = rank - 2;
3155
3156 if (rank != 4)
3157 {
3158 Fail("%s: Only inputs with rank 4 are supported", __func__);
3159 }
3160
3161 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3162 if (!output)
3163 {
3164 return Fail("%s: Could not read output 0", __func__);
3165 }
3166
3167 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3168 if (IsDynamicTensor(outputInfo))
3169 {
3170 return Fail("%s: Dynamic output tensors are not supported", __func__);
3171 }
3172
3173 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3174 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3175
3176 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3177 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3178 {
3179 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3180 }
3181
3182 std::vector<int32_t> blockShape;
3183 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3184 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3185 {
3186 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3187 }
3188
3189 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3190 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3191 {
3192 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3193 }
3194
3195 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3196 std::vector<int32_t> paddings;
3197 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3198 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3199 {
3200 int paddingBeforeInput = paddings[i];
3201 int paddingAfterInput = paddings[i + 1];
3202 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3203 {
3204 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3205 }
3206
3207 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3208 }
3209
3210 armnn::SpaceToBatchNdDescriptor descriptor;
3211 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3212 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3213 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3214
3215 if (Is12Operand(*output))
3216 {
3217 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3218 }
3219
3220 bool isSupported = false;
3221 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3222 IsSpaceToBatchNdSupported,
3223 data.m_Backends,
3224 isSupported,
3225 inputInfo,
3226 outputInfo,
3227 descriptor);
3228 if (!isSupported)
3229 {
3230 return false;
3231 }
3232
3233 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3234 assert(layer != nullptr);
3235 input.Connect(layer->GetInputSlot(0));
3236
3237 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3238}
3239
Kevin May407718f2019-09-09 14:46:41 +01003240template<typename HalPolicy,
3241 typename HalOperation = typename HalPolicy::Operation,
3242 typename HalModel = typename HalPolicy::Model>
3243bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3244{
3245 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3246
3247 if (!input.IsValid())
3248 {
3249 return Fail("%s: Operation has invalid input", __func__);
3250 }
3251
3252 using HalOperand = typename HalPolicy::Operand;
3253 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3254 if (!output)
3255 {
3256 return Fail("%s: Could not read output 0", __func__);
3257 }
3258
3259 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3260 if (IsDynamicTensor(outputInfo))
3261 {
3262 return Fail("%s: Dynamic output tensors are not supported", __func__);
3263 }
3264
3265 bool isSupported = false;
3266 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3267 IsAbsSupported,
3268 data.m_Backends,
3269 isSupported,
3270 input.GetTensorInfo(),
3271 outputInfo);
3272
3273 if (!isSupported)
3274 {
3275 return false;
3276 }
3277
3278 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3279 assert(layer != nullptr);
3280 input.Connect(layer->GetInputSlot(0));
3281
3282 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3283}
3284
3285
saoste01b8471482018-10-10 09:44:51 +01003286} // namespace armnn_driver