blob: 8dcf3524c053f7980c5c054d3d0f9571a6cda88e [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
128 std::string reasonIfUnsupported; \
129 try { \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
134 { \
135 supported = \
136 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
137 if (supported) \
138 { \
139 break; \
140 } \
141 else \
142 { \
143 if (reasonIfUnsupported.size() > 0) \
144 { \
145 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
146 } \
147 else \
148 { \
149 ALOGD("%s: not supported by armnn", funcName); \
150 } \
151 } \
152 } \
153 else \
154 { \
155 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
156 } \
157 } \
158 if (!supported) \
159 { \
160 ALOGD("%s: not supported by any specified backend", funcName); \
161 } \
162 } catch (const armnn::InvalidArgumentException &e) { \
163 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100164 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100165
Mike Kellyb5fdf382019-06-11 16:35:25 +0100166template<typename Operand>
167armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100168{
169 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
170}
171
Matthew Bentham912b3622019-05-03 15:49:14 +0100172inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100173{
Matthew Bentham912b3622019-05-03 15:49:14 +0100174 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
175 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
176 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100177}
178
Mike Kellyb5fdf382019-06-11 16:35:25 +0100179#ifdef ARMNN_ANDROID_NN_V1_2
180
181inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
182{
183 return type == V1_2::OperandType::BOOL ||
Mike Kellyd486e522019-12-17 12:19:09 +0000184 type == V1_2::OperandType::TENSOR_FLOAT16 ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185 type == V1_2::OperandType::TENSOR_FLOAT32 ||
186 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
187 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
188 type == V1_2::OperandType::TENSOR_INT32;
189}
190
191#endif
192
193inline bool IsBool(V1_0::Operand)
194{
195 return false;
196}
197
Sadik Armagan61113162019-07-25 09:09:40 +0100198inline bool Is12Operand(V1_0::Operand)
199{
200 return false;
201}
202
Mike Kellyb5fdf382019-06-11 16:35:25 +0100203#ifdef ARMNN_ANDROID_NN_V1_2
204
205inline bool IsBool(V1_2::Operand operand)
206{
207 return operand.type == V1_2::OperandType::BOOL;
208}
209
Sadik Armagan61113162019-07-25 09:09:40 +0100210/// Checks if a operand is 1_2 Operand
211inline bool Is12Operand(V1_2::Operand)
212{
213 return true;
214}
215
Mike Kellyb5fdf382019-06-11 16:35:25 +0100216#endif
217
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100218template<typename LayerHandleType>
219armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
220 armnn::TensorInfo reshapeInfo)
221{
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
224
225 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
226 BOOST_ASSERT(reshapeLayer != nullptr);
227
228 // Attach the input layer to the reshape layer
229 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
230 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
231
232 return *reshapeLayer;
233}
234
Derek Lamberti57ea6d12019-12-19 15:45:35 +0000235bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100236 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100237{
238 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100239
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100240 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
241 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
242
243 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
244 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
245
246 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100247 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100248 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
249 input0.Connect(startLayer->GetInputSlot(0));
250 input1.Connect(startLayer->GetInputSlot(1));
251
Sadik Armagan64b19b52019-08-19 09:49:58 +0100252 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100253 }
254
255 // Since the number of dimensions do not match then we need to add degenerate dimensions
256 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
257
258 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
259 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
260 boost::numeric_cast<int>(inputDimensions1));
261
262 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
263 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
264 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
265
266 const armnn::TensorShape& smallShape = smallInfo.GetShape();
267 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
268 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
269 {
270 reshapedDimensions[i] = smallShape[i - sizeDifference];
271 }
272
273 armnn::TensorInfo reshapedInfo = smallInfo;
274 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
275 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100276
277 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
278 armnn::ReshapeDescriptor reshapeDescriptor;
279
280 bool isSupported = false;
281 FORWARD_LAYER_SUPPORT_FUNC(__func__,
282 IsReshapeSupported,
283 data.m_Backends,
284 isSupported,
Derek Lamberti57ea6d12019-12-19 15:45:35 +0000285 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100286 reshapedInfo,
287 reshapeDescriptor);
288 if (!isSupported)
289 {
290 return false;
291 }
292
293 BOOST_ASSERT(data.m_Network != nullptr);
294 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100295
296 if (input0IsSmaller)
297 {
298 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
299 //
300 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100301 // | |
302 // Reshape |
303 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100305
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100306 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
307 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100308 }
309 else
310 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100311 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
312 //
313 // Input0 Input1
314 // | |
315 // | Reshape
316 // \ /
317 // StartLayer
318
arovir01b0717b52018-09-05 17:03:25 +0100319 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100320 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100321 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100322
323 return true;
arovir01b0717b52018-09-05 17:03:25 +0100324}
325
326void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
327 android::nn::PaddingScheme scheme)
328{
329 int32_t padHead;
330 int32_t padTail;
331 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
332 outPadHead = boost::numeric_cast<uint32_t>(padHead);
333 outPadTail = boost::numeric_cast<uint32_t>(padTail);
334}
335
Mike Kelly86b36d42019-07-12 16:39:33 +0100336#ifdef ARMNN_ANDROID_NN_V1_2
337
338void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
339 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
340{
341 int32_t padHead;
342 int32_t padTail;
343 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
344 outPadHead = boost::numeric_cast<uint32_t>(padHead);
345 outPadTail = boost::numeric_cast<uint32_t>(padTail);
346}
347
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100348void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
349 int32_t& outPadTail, android::nn::PaddingScheme scheme)
350{
351 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
352}
353
Mike Kelly86b36d42019-07-12 16:39:33 +0100354#endif
355
Matthew Bentham912b3622019-05-03 15:49:14 +0100356Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100357{
358 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100359 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100360 shape.dimensions = operand.dimensions;
361 shape.scale = operand.scale;
362 shape.offset = operand.zeroPoint;
363 return shape;
364}
365
Mike Kelly46272802019-08-14 17:00:48 +0100366#ifdef ARMNN_ANDROID_NN_V1_2
367
368Shape GetOperandShape(const V1_2::Operand& operand)
369{
370 Shape shape;
371 shape.type = OperandType(operand.type);
372 shape.dimensions = operand.dimensions;
373 shape.scale = operand.scale;
374 shape.offset = operand.zeroPoint;
375 return shape;
376}
377
378#endif
379
arovir01b0717b52018-09-05 17:03:25 +0100380// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
381// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100382// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
383// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100384void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
385 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
386{
387 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
388 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
389 {
390 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
391 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
392 {
393 ALOGW("Bias quantization scale has been modified to match input*weights");
394 biasInfo.SetQuantizationScale(expectedBiasScale);
395 }
396 }
397}
398
399// 4D Tensor Permutations
400const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
401const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
402const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
403const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
404
405// 3D Permutation Vectors
406const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
407const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
408const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
409
410template<typename OSlot>
411armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
412 const armnn::PermutationVector& mappings)
413{
414 // Add swizzle layer
415 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
416
417 BOOST_ASSERT(layer != nullptr);
418
419 // Connect input to swizzle layer
420 input.Connect(layer->GetInputSlot(0));
421
422 // Setup swizzled output
423 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
424 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
425
426 return *layer;
427}
428
429void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
430{
431 // Add swizzle layer
432 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
433 // Connect swizzled input to layer
434 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
435}
436
437armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
438{
439 // Add deswizzle layer
440 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
441 return deswizzleLayer;
442}
443
444// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
445armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
446 LayerInputHandle& input,
447 armnn::IConnectableLayer& firstLayer,
448 armnn::IConnectableLayer& lastLayer)
449{
450 SwizzleIn(network, input, firstLayer, 0);
451 return DeswizzleOut(network, lastLayer, 0);
452}
453
454// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
455armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
456 armnn::IConnectableLayer& layer)
457{
458 return SwizzleInDeswizzleOut(network, input, layer, layer);
459}
460
461bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
462 const armnn::TensorShape & outputShape,
463 uint32_t concatDim)
464{
465 // Validate the output shape is correct given the input shapes (which have just been validated)
466 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
467 if (outputShape.GetNumDimensions() != numDimensions)
468 {
469 return Fail("%s: Output shape has wrong number of dimensions", __func__);
470 }
471
472 unsigned int outputSizeAlongConcatenatedDimension = 0;
473 for (unsigned int i = 0; i < inputShapes.size(); i++)
474 {
475 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
476 }
477
478 for (unsigned int i = 0; i < numDimensions; ++i)
479 {
480 if (i == concatDim)
481 {
482 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
483 {
484 return Fail(
485 "%s: Invalid output shape for dimension %d (%d != %d)",
486 __func__,
487 i,
488 outputShape[i],
489 outputSizeAlongConcatenatedDimension);
490 }
491 }
492 else
493 {
494 if (outputShape[i] != inputShapes[0][i])
495 {
496 return Fail("%s: Invalid output shape", __func__);
497 }
498 }
499 }
500
501 return true;
502}
503
504bool RequiresReshape(armnn::TensorShape & inputShape)
505{
506 return inputShape.GetNumDimensions() < 3;
507}
508
arovir01b0717b52018-09-05 17:03:25 +0100509void SwizzleInputs(armnn::INetwork& network,
510 std::vector<LayerInputHandle>& inputs,
511 std::vector<armnn::TensorShape>& inputShapes,
512 const armnn::PermutationVector& mapping)
513{
514 if (!mapping.IsEqual(IdentityPermutation4D))
515 {
516 size_t nInputs = inputs.size();
517 for (size_t i=0; i<nInputs; ++i)
518 {
519 // add swizzle layer
520 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
521 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
522 auto& outputInfo = outputSlot.GetTensorInfo();
523 // replace inputs with the swizzled ones
524 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
525 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
526 }
527 }
528}
529
Kevin May784a04b2019-12-12 16:33:31 +0000530bool CheckReshapeSupported(ConversionData& data,
531 std::vector<LayerInputHandle>& inputs,
532 std::vector<armnn::TensorShape>& inputShapes,
533 const armnn::PermutationVector& mapping,
534 const armnn::TensorInfo& outputInfo)
535{
536 if (!mapping.IsEqual(IdentityPermutation4D))
537 {
538 size_t nInputs = inputs.size();
539 for (size_t i=0; i<nInputs; ++i)
540 {
541 // check permute layer
542 armnn::PermuteDescriptor permuteDesc;
543 permuteDesc.m_DimMappings = mapping;
544
545 bool isSupported = false;
546 FORWARD_LAYER_SUPPORT_FUNC(__func__,
547 IsPermuteSupported,
548 data.m_Backends,
549 isSupported,
550 inputs[i].GetTensorInfo(),
551 outputInfo,
552 permuteDesc);
553 if (!isSupported)
554 {
555 return false;
556 }
557
558 }
559 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
560 }
561 return true;
562}
563
564
narpra01f176d5a2018-11-18 20:17:48 +0000565bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
566 int32_t & concatDimension,
567 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100568{
narpra01f176d5a2018-11-18 20:17:48 +0000569 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100570 BOOST_ASSERT(numberOfDimensions >= 3);
571
572 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000573 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
574 // or along dimension 0 or 2 for a 3-D tensor.
575 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100576 {
narpra01f176d5a2018-11-18 20:17:48 +0000577 concatDimension = 1;
578 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
579 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100580 }
narpra01f176d5a2018-11-18 20:17:48 +0000581 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100582 {
narpra01f176d5a2018-11-18 20:17:48 +0000583 concatDimension = 0;
584 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
585 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100586 }
narpra01f176d5a2018-11-18 20:17:48 +0000587 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100588}
589
590} // anonymous namespace
591
592namespace armnn_driver
593{
594
595//// Creates an ArmNN activation layer and connects it to the given layer, if the
596//// passed in AndroidNN activation function requires so.
597//// @return The end layer of the sequence of layers built for the given AndroidNN
598//// activation function or nullptr if an error occurred (e.g. unsupported activation).
599//// Note that the end layer matches the input layer if no activation is required
600//// (the sequence of layers has length 1).
601armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
602 ActivationFn activation,
603 armnn::IConnectableLayer* prevLayer,
604 ConversionData& data);
605
606} // namespace armnn_driver
607
608///
609/// Utility templates
610///
611
612namespace armnn_driver
613{
614
615using namespace android::nn;
616
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100617template<typename HalPolicy,
618 typename HalOperand = typename HalPolicy::Operand,
619 typename HalOperation = typename HalPolicy::Operation,
620 typename HalModel = typename HalPolicy::Model>
621const HalOperand* GetInputOperand(const HalOperation& operation,
622 uint32_t inputIndex,
623 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100624 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100625{
626 if (inputIndex >= operation.inputs.size())
627 {
saoste01b8471482018-10-10 09:44:51 +0100628 if (failOnIndexOutOfBounds)
629 {
630 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
631 }
arovir01b0717b52018-09-05 17:03:25 +0100632 return nullptr;
633 }
634
635 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
636 return &model.operands[operation.inputs[inputIndex]];
637}
638
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100639template<typename HalPolicy,
640 typename HalOperand = typename HalPolicy::Operand,
641 typename HalOperation = typename HalPolicy::Operation,
642 typename HalModel = typename HalPolicy::Model>
643const HalOperand* GetOutputOperand(const HalOperation& operation,
644 uint32_t outputIndex,
645 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100646{
647 if (outputIndex >= operation.outputs.size())
648 {
649 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
650 return nullptr;
651 }
652
653 // Model should have been validated beforehand
654 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
655
656 return &model.operands[operation.outputs[outputIndex]];
657}
658
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100659template<typename HalPolicy,
660 typename HalOperand = typename HalPolicy::Operand,
661 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100662const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100663 const HalModel& model,
664 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000665 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100666{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100667 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100668
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100669 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100670 switch (operand.lifetime)
671 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100673 {
674 // Constant found in model.operandValues
675 valueStart = &model.operandValues[operand.location.offset];
676 break;
677 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100678 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100679 {
680 // Constant specified via a Memory object
681 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
682 break;
683 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100684 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000685 {
686 // An optional input tensor with no values is not an error so should not register as a fail
687 if (optional)
688 {
689 valueStart = nullptr;
690 break;
691 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100692 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000693 }
arovir01b0717b52018-09-05 17:03:25 +0100694 default:
695 {
696 // Unsupported/invalid (e.g. can't get value of an input to the model)
697 Fail("%s: unsupported/invalid operand lifetime: %s",
698 __func__, toString(operand.lifetime).c_str());
699 valueStart = nullptr;
700 }
701 }
702
703 return valueStart;
704}
705
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100706template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100707 typename HalOperation = typename HalPolicy::Operation,
708 typename HalModel = typename HalPolicy::Model,
709 typename HalOperandType = typename HalPolicy::OperandType>
710bool GetOperandType(const HalOperation& operation,
711 uint32_t inputIndex,
712 const HalModel& model,
713 HalOperandType& type)
714{
715 using HalOperand = typename HalPolicy::Operand;
716
717 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
718 if (!operand)
719 {
720 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
721 }
722
723 type = operand->type;
724 return true;
725}
726
727template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100728 typename HalOperand = typename HalPolicy::Operand,
729 typename HalModel = typename HalPolicy::Model>
730ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
731 const HalModel& model,
732 const ConversionData& data,
733 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
734 const armnn::TensorShape* overrideTensorShape = nullptr,
735 bool optional = false)
736{
737 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
738
739 if (!IsOperandTypeSupportedForTensors(operand.type))
740 {
741 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
742 return ConstTensorPin();
743 }
744
745 if (!optional &&
746 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
747 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
748 operand.lifetime != HalOperandLifeTime::NO_VALUE)
749 {
750 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
751 return ConstTensorPin();
752 }
753
754 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
755 if (!valueStart)
756 {
757 if (optional)
758 {
759 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
760 return ConstTensorPin(true);
761 }
762 // mandatory tensor with no values
763 Fail("%s: failed to get operand address", __func__);
764 return ConstTensorPin();
765 }
766
767 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
768 if (overrideTensorShape != nullptr)
769 {
770 tensorInfo.SetShape(*overrideTensorShape);
771 }
772 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
773}
774
775template<typename HalPolicy,
776 typename HalOperation = typename HalPolicy::Operation,
777 typename HalModel = typename HalPolicy::Model>
778ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
779 uint32_t inputIndex,
780 const HalModel& model,
781 const ConversionData& data,
782 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
783 const armnn::TensorShape* overrideTensorShape = nullptr,
784 bool optional = false)
785{
786 using HalOperand = typename HalPolicy::Operand;
787
788 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
789 if (!operand)
790 {
791 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
792 return ConstTensorPin();
793 }
794 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
795 model,
796 data,
797 dimensionMappings,
798 overrideTensorShape,
799 optional);
800}
801
802template<typename HalPolicy,
803 typename OutputType,
804 typename HalOperandType = typename HalPolicy::OperandType,
805 typename HalOperation = typename HalPolicy::Operation,
806 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100807bool GetInputScalar(const HalOperation& operation,
808 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100809 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100810 OutputType& outValue,
811 const HalModel& model,
812 const ConversionData& data)
813{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100814 using HalOperand = typename HalPolicy::Operand;
815
816 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100817 if (!operand)
818 {
819 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
820 }
821
822 if (operand->type != type)
823 {
824 return Fail("%s: unexpected operand type: %s (should be %s)",
825 __func__, toString(operand->type).c_str(), toString(type).c_str());
826 }
827
828 if (operand->location.length != sizeof(OutputType))
829 {
830 return Fail("%s: incorrect operand location length: %i (should be %i)",
831 __func__, operand->location.length, sizeof(OutputType));
832 }
833
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100835 if (!valueAddress)
836 {
837 return Fail("%s: failed to get address for operand", __func__);
838 }
839
840 outValue = *(static_cast<const OutputType*>(valueAddress));
841 return true;
842}
843
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100844template<typename HalPolicy,
845 typename HalOperation = typename HalPolicy::Operation,
846 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100847bool GetInputInt32(const HalOperation& operation,
848 uint32_t inputIndex,
849 int32_t& outValue,
850 const HalModel& model,
851 const ConversionData& data)
852{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100854}
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856template<typename HalPolicy,
857 typename HalOperation = typename HalPolicy::Operation,
858 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100859bool GetInputFloat32(const HalOperation& operation,
860 uint32_t inputIndex,
861 float& outValue,
862 const HalModel& model,
863 const ConversionData& data)
864{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100865 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100866}
867
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100868template<typename HalPolicy,
869 typename HalOperation = typename HalPolicy::Operation,
870 typename HalOperandType = typename HalPolicy::OperandType,
871 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100872bool GetInputActivationFunctionImpl(const HalOperation& operation,
873 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100874 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100875 ActivationFn& outActivationFunction,
876 const HalModel& model,
877 const ConversionData& data)
878{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100879 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100880 {
881 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
882 __func__,
883 toString(type).c_str(),
884 toString(OperandType::INT32).c_str(),
885 toString(OperandType::TENSOR_INT32).c_str());
886 }
887
888 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100889 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100890 {
891 return Fail("%s: failed to get activation input value", __func__);
892 }
893 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
894 return true;
895}
896
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100897template<typename HalPolicy,
898 typename HalOperation = typename HalPolicy::Operation,
899 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100900bool GetInputActivationFunction(const HalOperation& operation,
901 uint32_t inputIndex,
902 ActivationFn& outActivationFunction,
903 const HalModel& model,
904 const ConversionData& data)
905{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100906 return GetInputActivationFunctionImpl<HalPolicy>(operation,
907 inputIndex,
908 HalPolicy::OperandType::INT32,
909 outActivationFunction,
910 model,
911 data);
arovir01b0717b52018-09-05 17:03:25 +0100912}
913
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914template<typename HalPolicy,
915 typename HalOperation = typename HalPolicy::Operation,
916 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100917bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
918 uint32_t inputIndex,
919 ActivationFn& outActivationFunction,
920 const HalModel& model,
921 const ConversionData& data)
922{
923 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100924 return GetInputActivationFunctionImpl<HalPolicy>(operation,
925 inputIndex,
926 HalPolicy::OperandType::INT32,
927 outActivationFunction,
928 model,
929 data);
arovir01b0717b52018-09-05 17:03:25 +0100930}
931
932
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100933template<typename HalPolicy,
934 typename HalOperation = typename HalPolicy::Operation,
935 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100936bool GetOptionalInputActivation(const HalOperation& operation,
937 uint32_t inputIndex,
938 ActivationFn& activationFunction,
939 const HalModel& model,
940 const ConversionData& data)
941{
942 if (operation.inputs.size() <= inputIndex)
943 {
944 activationFunction = ActivationFn::kActivationNone;
945 }
946 else
947 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100948 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100949 {
950 return Fail("%s: Operation has invalid inputs", __func__);
951 }
952 }
953 return true;
954}
955
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100956template<typename HalPolicy,
957 typename ConvolutionDescriptor,
958 typename HalOperation = typename HalPolicy::Operation,
959 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100960bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
961 uint32_t dilationXIndex,
962 ConvolutionDescriptor& descriptor,
963 const HalModel& model,
964 const ConversionData& data)
965{
966 bool success = true;
967 if (operation.inputs.size() >= dilationXIndex + 2)
968 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100969 success &= GetInputScalar<HalPolicy>(operation,
970 dilationXIndex,
971 HalPolicy::OperandType::INT32,
972 descriptor.m_DilationX,
973 model,
974 data);
975 success &= GetInputScalar<HalPolicy>(operation,
976 dilationXIndex + 1,
977 HalPolicy::OperandType::INT32,
978 descriptor.m_DilationY,
979 model,
980 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100981 }
982
983 return success;
984}
985
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100986template<typename HalPolicy,
987 typename HalOperand = typename HalPolicy::Operand,
988 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100989bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100990 std::vector<int32_t>& outValues,
991 const HalModel& model,
992 const ConversionData& data)
993{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100994 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100995 {
996 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
997 }
998
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100999 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001000 if (!startAddress)
1001 {
1002 return Fail("%s: failed to get operand address", __func__, operand.type);
1003 }
1004
1005 // Check number of bytes is sensible
1006 const uint32_t numBytes = operand.location.length;
1007 if (numBytes % sizeof(int32_t) != 0)
1008 {
1009 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1010 __func__, numBytes, sizeof(int32_t));
1011 }
1012
1013 outValues.resize(numBytes / sizeof(int32_t));
1014 memcpy(outValues.data(), startAddress, numBytes);
1015 return true;
1016}
1017
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001018template<typename HalPolicy,
1019 typename HalOperation = typename HalPolicy::Operation,
1020 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001021bool GetInputPaddingScheme(const HalOperation& operation,
1022 uint32_t inputIndex,
1023 PaddingScheme& outPaddingScheme,
1024 const HalModel& model,
1025 const ConversionData& data)
1026{
1027 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001029 {
1030 return Fail("%s: failed to get padding scheme input value", __func__);
1031 }
1032
1033 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1034 return true;
1035}
1036
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037template<typename HalPolicy,
1038 typename HalOperation = typename HalPolicy::Operation,
1039 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001040LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1041 uint32_t inputIndex,
1042 const HalModel& model,
1043 ConversionData& data)
1044{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001045 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001046 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1048
1049 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001050 if (!operand)
1051 {
1052 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1053 return LayerInputHandle();
1054 }
1055
1056 if (!IsOperandTypeSupportedForTensors(operand->type))
1057 {
1058 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1059 return LayerInputHandle();
1060 }
1061
Sadik Armagan44bcc022019-06-18 17:21:36 +01001062 try
arovir01b0717b52018-09-05 17:03:25 +01001063 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001064 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001065 if (IsDynamicTensor(operandTensorInfo))
1066 {
1067 Fail("%s: dynamic input tensors are not supported", __func__);
1068 return LayerInputHandle();
1069 }
arovir01b0717b52018-09-05 17:03:25 +01001070
Sadik Armagan44bcc022019-06-18 17:21:36 +01001071 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001072 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001073 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001074 {
1075 // NOTE: We must check whether we can support the input tensor on at least one
1076 // of the provided backends; otherwise we cannot convert the operation
1077 bool isInputSupported = false;
1078 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1079 IsInputSupported,
1080 data.m_Backends,
1081 isInputSupported,
1082 operandTensorInfo);
1083
1084 if (!isInputSupported)
1085 {
1086 Fail("%s: unsupported input tensor", __func__);
1087 return LayerInputHandle();
1088 }
1089
1090 BOOST_FALLTHROUGH; // intentional fallthrough
1091 }
1092 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001093 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001094 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001095 // The tensor is either an operand internal to the model, or a model input.
1096 // It can be associated with an ArmNN output slot for an existing layer.
1097
1098 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1099 const uint32_t operandIndex = operation.inputs[inputIndex];
1100 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001101 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001102 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 case HalOperandLifeTime::CONSTANT_REFERENCE:
1104 {
1105 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1106 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1107 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001108 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001109 bool isSupported = false;
1110 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1111 IsConstantSupported,
1112 data.m_Backends,
1113 isSupported,
1114 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001115 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001116 {
1117 return LayerInputHandle();
1118 }
1119
1120 armnn::IConnectableLayer* constantLayer =
1121 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1122 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1123 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1124
1125 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1126 }
1127 else
1128 {
1129 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001130 return LayerInputHandle();
1131 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001132 break;
arovir01b0717b52018-09-05 17:03:25 +01001133 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001134 default:
arovir01b0717b52018-09-05 17:03:25 +01001135 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001136 // Unsupported lifetime for an input tensor
1137 Fail("%s: unsupported lifetime for input tensor: %s",
1138 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001139 return LayerInputHandle();
1140 }
arovir01b0717b52018-09-05 17:03:25 +01001141 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001142 }
1143 catch (UnsupportedOperand<HalOperandType>& e)
1144 {
1145 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1146 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001147 }
1148}
1149
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150template<typename HalPolicy,
1151 typename HalOperation = typename HalPolicy::Operation,
1152 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1154 uint32_t operationOutputIndex,
1155 armnn::IConnectableLayer& layer,
1156 uint32_t layerOutputIndex,
1157 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001158 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001159{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001160 using HalOperand = typename HalPolicy::Operand;
1161
1162 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001163 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1164 {
1165 return false;
1166 }
1167
1168 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1169
1170 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1171 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1172
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001173 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001174
1175 return true;
1176}
1177
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001178template<typename HalPolicy,
1179 typename HalOperation = typename HalPolicy::Operation,
1180 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001181armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1182 uint32_t inputIndex,
1183 const HalModel& model,
1184 ConversionData& data)
1185{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001186 using HalOperand = typename HalPolicy::Operand;
1187
1188 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001189 if (!operand)
1190 {
1191 return armnn::DataLayout::NHWC;
1192 }
1193
1194 if (!IsBool(*operand))
1195 {
1196 return armnn::DataLayout::NHWC;
1197 }
1198
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001200 if (!valueAddress)
1201 {
1202 return armnn::DataLayout::NHWC;
1203 }
1204
1205 if (*(static_cast<const bool*>(valueAddress)))
1206 {
1207 return armnn::DataLayout::NCHW;
1208 }
1209 else
1210 {
1211 return armnn::DataLayout::NHWC;
1212 }
1213}
1214
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001215template<typename HalPolicy,
1216 typename HalOperation = typename HalPolicy::Operation,
1217 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001218bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1219 uint32_t outputIndex,
1220 armnn::IConnectableLayer& layer,
1221 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001222 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001223{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001224 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1225 outputIndex,
1226 layer,
1227 outputIndex,
1228 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001229 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001230}
1231
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001232template<typename HalPolicy,
1233 typename HalOperation = typename HalPolicy::Operation,
1234 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001235bool ConvertToActivation(const HalOperation& operation,
1236 const char* operationName,
1237 const armnn::ActivationDescriptor& activationDesc,
1238 const HalModel& model,
1239 ConversionData& data)
1240{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001241 using HalOperand = typename HalPolicy::Operand;
1242
1243 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001244 if (!input.IsValid())
1245 {
1246 return Fail("%s: Input 0 is invalid", operationName);
1247 }
1248
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001249 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001250 if (!outputOperand)
1251 {
1252 return false;
1253 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001254
1255 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001256 if (IsDynamicTensor(outInfo))
1257 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001258 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001259 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001260
1261 bool isSupported = false;
1262 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1263 IsActivationSupported,
1264 data.m_Backends,
1265 isSupported,
1266 input.GetTensorInfo(),
1267 outInfo,
1268 activationDesc);
1269 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001270 {
1271 return false;
1272 }
1273
1274 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1275 BOOST_ASSERT(layer != nullptr);
1276 input.Connect(layer->GetInputSlot(0));
1277
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001278 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001279}
1280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001281template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001282 typename HalOperation = typename HalPolicy::Operation,
1283 typename HalModel = typename HalPolicy::Model>
1284bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1285{
1286 armnn::ActivationDescriptor desc;
1287 desc.m_Function = armnn::ActivationFunction::ReLu;
1288
1289 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1290}
1291
1292template<typename HalPolicy,
1293 typename HalOperation = typename HalPolicy::Operation,
1294 typename HalModel = typename HalPolicy::Model>
1295bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1296{
1297 armnn::ActivationDescriptor desc;
1298 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1299 desc.m_A = 1.0f;
1300 desc.m_B = -1.0f;
1301
1302 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1303}
1304
1305template<typename HalPolicy,
1306 typename HalOperation = typename HalPolicy::Operation,
1307 typename HalModel = typename HalPolicy::Model>
1308bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1309{
1310 armnn::ActivationDescriptor desc;
1311 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1312 desc.m_A = 6.0f;
1313
1314 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1315}
1316
1317template<typename HalPolicy,
1318 typename HalOperation = typename HalPolicy::Operation,
1319 typename HalModel = typename HalPolicy::Model>
1320bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1321{
1322 armnn::ActivationDescriptor desc;
1323 desc.m_Function = armnn::ActivationFunction::TanH;
1324 desc.m_A = 1.0f; // android nn does not support tanH parameters
1325 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1326
1327 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1328}
1329
1330template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001331 typename HalOperation = typename HalPolicy::Operation,
1332 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001333bool ConvertPaddings(const HalOperation& operation,
1334 const HalModel& model,
1335 ConversionData& data,
1336 unsigned int rank,
1337 armnn::PadDescriptor& padDescriptor)
1338{
1339 using HalOperand = typename HalPolicy::Operand;
1340
1341 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1342 if (!paddingsOperand)
1343 {
1344 return Fail("%s: Could not read paddings operand", __func__);
1345 }
1346
1347 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1348 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1349 {
1350 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1351 }
1352
1353 std::vector<int32_t> paddings;
1354 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1355
1356 // add padding for each dimension of input tensor.
1357 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1358 {
1359 int paddingBeforeInput = paddings[i];
1360 int paddingAfterInput = paddings[i + 1];
1361
1362 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1363 {
1364 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1365 }
1366
1367 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1368 }
1369
1370 return true;
1371}
1372
1373template<typename HalPolicy,
1374 typename HalOperation = typename HalPolicy::Operation,
1375 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001376bool ConvertPooling2d(const HalOperation& operation,
1377 const char* operationName,
1378 armnn::PoolingAlgorithm poolType,
1379 const HalModel& model,
1380 ConversionData& data)
1381{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001382 using HalOperand = typename HalPolicy::Operand;
1383 using HalOperandType = typename HalPolicy::OperandType;
1384
1385 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001386 if (!input.IsValid())
1387 {
Mike Kellyc7d0d442019-12-11 19:27:11 +00001388 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001389 }
1390
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001391 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001392 if (!output)
1393 {
1394 return Fail("%s: Could not read output 0", __func__);
1395 }
1396
1397 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1398 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1399
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001400 if (IsDynamicTensor(outputInfo))
1401 {
1402 return Fail("%s: Dynamic output tensors are not supported", __func__);
1403 }
1404
arovir01b0717b52018-09-05 17:03:25 +01001405 armnn::Pooling2dDescriptor desc;
1406 desc.m_PoolType = poolType;
1407 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001408 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001409
1410 ActivationFn activation;
1411
Sadik Armagan15d63e22019-07-26 16:59:35 +01001412 auto inputSize = operation.inputs.size();
1413
1414 if (inputSize >= 10)
1415 {
1416 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1417 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1418 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1419 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1420 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1421 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1422 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1423 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1424 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1425 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1426 {
1427 return Fail("%s: Operation has invalid inputs", operationName);
1428 }
1429
1430 if (Is12Operand(*output))
1431 {
1432 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1433 }
1434 }
1435 else
arovir01b0717b52018-09-05 17:03:25 +01001436 {
1437 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1438 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001439 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1440 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1441 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1442 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1443 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1444 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001445 {
1446 return Fail("%s: Operation has invalid inputs", operationName);
1447 }
1448
Sadik Armagan15d63e22019-07-26 16:59:35 +01001449 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001450 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001451 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001452 }
Mike Kellyc7d0d442019-12-11 19:27:11 +00001453
1454 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1455 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1456 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1457
1458 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1459 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001460 }
1461
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001462 bool isSupported = false;
1463 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1464 IsPooling2dSupported,
1465 data.m_Backends,
1466 isSupported,
1467 inputInfo,
1468 outputInfo,
1469 desc);
1470 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001471 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001472 return false;
arovir01b0717b52018-09-05 17:03:25 +01001473 }
arovir01b0717b52018-09-05 17:03:25 +01001474
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001475 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1476 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001477 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001478 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001479 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001480
1481 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1482 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001483 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001484 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001485 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001486
1487 input.Connect(pooling2dLayer->GetInputSlot(0));
1488
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001489 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001490}
1491
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001492template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001493 typename Operation = typename HalPolicy::Operation,
1494 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001495bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1496{
1497 using Operand = typename HalPolicy::Operand;
1498
1499 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1500 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1501
1502 if (!input0.IsValid() || !input1.IsValid())
1503 {
1504 return Fail("%s: Operation has invalid inputs", __func__);
1505 }
1506
1507 // The FuseActivation parameter is always the input index 2
1508 // and it should be optional
1509 ActivationFn activationFunction;
1510 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1511 {
1512 return Fail("%s: Operation has invalid inputs", __func__);
1513 }
1514
1515 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1516 if (!outputOperand)
1517 {
1518 return false;
1519 }
1520
1521 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1522 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1523
1524 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1525 if (IsDynamicTensor(outputInfo))
1526 {
1527 return Fail("%s: Dynamic output tensors are not supported", __func__);
1528 }
1529
1530 bool isSupported = false;
1531 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1532 IsAdditionSupported,
1533 data.m_Backends,
1534 isSupported,
1535 inputInfo0,
1536 inputInfo1,
1537 outputInfo);
1538 if (!isSupported)
1539 {
1540 return false;
1541 }
1542
1543 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1544 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1545
1546 if (endLayer != nullptr)
1547 {
Derek Lamberti57ea6d12019-12-19 15:45:35 +00001548 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001549 if (!isReshapeSupported)
1550 {
1551 return false;
1552 }
1553
Mike Kelly46272802019-08-14 17:00:48 +01001554 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1555 }
1556 else
1557 {
1558 return Fail("%s: ProcessActivation failed", __func__);
1559 }
1560}
1561
1562template<typename HalPolicy,
1563 typename Operation = typename HalPolicy::Operation,
1564 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001565bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1566{
1567 using HalOperand = typename HalPolicy::Operand;
1568 using HalOperandType = typename HalPolicy::OperandType;
1569
1570 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1571 if (operation.inputs.size() <= 1)
1572 {
1573 return Fail("%s: Operation has insufficient arguments", __func__);
1574 }
1575
1576 // Get inputs and outputs
1577 const std::size_t numInputTensors = operation.inputs.size() - 1;
1578
1579 int32_t concatDim;
1580 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1581 {
1582 return Fail("%s: Operation has invalid inputs", __func__);
1583 }
1584
1585 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1586 if (!outputOperand)
1587 {
1588 return Fail("%s: Operation has no outputs", __func__);
1589 }
1590
1591
1592 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1593 armnn::TensorShape outputShape = outputInfo.GetShape();
1594
1595 //
1596 // handle negative concat dims along the lines of tensorflow as described here:
1597 // https://www.tensorflow.org/api_docs/python/tf/concat
1598 // "negative axis refers to axis + rank(values)-th dimension"
1599 //
1600 if (concatDim < 0)
1601 {
1602 concatDim += outputShape.GetNumDimensions();
1603 }
1604
1605 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1606 {
1607 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1608 }
1609
1610 std::vector<LayerInputHandle> inputHandles;
1611 std::vector<armnn::TensorShape> inputShapes;
1612
1613 inputHandles.reserve(numInputTensors);
1614 inputShapes.reserve(numInputTensors);
1615
1616 bool inputsHaveBeenReshaped = false;
1617 unsigned int tensorDimensionsAdded = 0;
1618
1619 for (uint32_t i = 0; i < numInputTensors; ++i)
1620 {
1621 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1622 if (!operand)
1623 {
1624 return Fail("%s: Operation has invalid inputs", __func__);
1625 }
1626
Mike Kellyc7d0d442019-12-11 19:27:11 +00001627 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1628 if (!operandInputHandle.IsValid())
1629 {
1630 return Fail("%s: Operation has invalid inputs", __func__);
1631 }
Mike Kellyb8805202019-07-31 17:25:43 +01001632
Mike Kellyc7d0d442019-12-11 19:27:11 +00001633 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001634 if (operandShape.GetNumDimensions() == 0)
1635 {
1636 return Fail("%s: Operands with rank 0 are not supported", __func__);
1637 }
1638
1639 if (RequiresReshape(operandShape))
1640 {
1641 inputsHaveBeenReshaped = true;
1642
1643 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1644
1645 // Expand the tensor to three dimensions
1646 if (operandShape.GetNumDimensions() == 2)
1647 {
1648 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1649 tensorDimensionsAdded = 1;
1650 }
1651 else
1652 {
1653 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1654 tensorDimensionsAdded = 2;
1655 }
1656
Kevin May784a04b2019-12-12 16:33:31 +00001657 armnn::ReshapeDescriptor reshapeDescriptor;
1658 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1659
1660 bool isSupported = false;
1661 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1662 IsReshapeSupported,
1663 data.m_Backends,
1664 isSupported,
1665 operandInputHandle.GetTensorInfo(),
1666 reshapeInfo,
1667 reshapeDescriptor);
1668 if (!isSupported)
1669 {
1670 return false;
1671 }
1672
Mike Kellyb8805202019-07-31 17:25:43 +01001673 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1674 *data.m_Network,
1675 operandInputHandle,
1676 reshapeInfo
1677 );
1678
1679 // Point to the reshape operation rather then the input operation
1680 operandShape = reshapeInfo.GetShape();
1681 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1682 }
1683
1684 inputShapes.emplace_back(operandShape);
1685 inputHandles.emplace_back(operandInputHandle);
1686
1687 if (!inputHandles.back().IsValid())
1688 {
1689 return Fail("%s: Operation has invalid inputs", __func__);
1690 }
1691 }
1692
1693 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1694
1695 if (inputsHaveBeenReshaped)
1696 {
1697 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1698 concatDim += tensorDimensionsAdded;
1699
1700 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1701 if (tensorDimensionsAdded == 1)
1702 {
1703 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1704 }
1705 else if (tensorDimensionsAdded == 2)
1706 {
1707 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1708 }
1709 }
1710
1711 // Check if permutations is required and get the pair of permutations required for the concatenation.
1712 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1713 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1714 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1715
1716 bool needPermute =
1717 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1718
1719 if (needPermute)
1720 {
1721 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1722 }
1723
1724 outputInfo.SetShape(outputShape);
1725
1726 // this is no-op for identity swizzles, otherwise it replaces both
1727 // the handles and shapes with the swizzled layer output handles and shapes
Kevin May784a04b2019-12-12 16:33:31 +00001728 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1729 {
1730 return false;
1731 }
Mike Kellyb8805202019-07-31 17:25:43 +01001732
1733 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1734 armnn::OriginsDescriptor concatDescriptor;
1735
1736 try
1737 {
1738 // The concat descriptor is always created across the only supported concat dimension
1739 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1740 concatDescriptor =
1741 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1742 }
Mike Kellyc7d0d442019-12-11 19:27:11 +00001743 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001744 {
1745 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1746 }
1747
1748 // Validate the output shape is correct given the input shapes based on the
1749 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1750 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1751 {
1752 return Fail("%s: Error validating the output shape for concat", __func__);
1753 }
1754
1755 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1756 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1757 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1758
1759 bool isSupported = false;
1760 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1761 IsConcatSupported,
1762 data.m_Backends,
1763 isSupported,
1764 inputTensorInfos,
1765 outputInfo,
1766 concatDescriptor);
1767 if (!isSupported)
1768 {
1769 return false;
1770 }
1771
1772 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1773 assert(layer != nullptr);
1774 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1775
1776 // Connect inputs to the layer
1777 const int numInputSlots = layer->GetNumInputSlots();
1778 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1779 for (int i = 0; i < numInputSlots; ++i)
1780 {
1781 // connect the input directly to the merge (concat) layer
1782 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1783 }
1784
1785 if (needPermute)
1786 {
Kevin May784a04b2019-12-12 16:33:31 +00001787 armnn::PermuteDescriptor permuteDesc;
1788 permuteDesc.m_DimMappings = permutationPair.second;
1789
1790 bool isSupported = false;
1791 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1792 IsPermuteSupported,
1793 data.m_Backends,
1794 isSupported,
1795 layer->GetOutputSlot(0).GetTensorInfo(),
1796 outputInfo,
1797 permuteDesc);
1798 if (!isSupported)
1799 {
1800 return false;
1801 }
Mike Kellyb8805202019-07-31 17:25:43 +01001802 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1803 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1804 layer->GetOutputSlot(0),
1805 permutationPair.second);
1806 layer = &deswizzleLayer;
1807 }
1808
1809 if (inputsHaveBeenReshaped)
1810 {
1811 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1812
1813 // Undo the reshape knowing the amount of dimensions added
1814 if (tensorDimensionsAdded == 1)
1815 {
1816 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1817 afterConcatInfo.GetShape()[2] }));
1818 }
1819 else if (tensorDimensionsAdded == 2)
1820 {
1821 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1822 }
1823
Kevin May784a04b2019-12-12 16:33:31 +00001824 armnn::ReshapeDescriptor reshapeDescriptor;
1825 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1826
1827 bool isSupported = false;
1828 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1829 IsReshapeSupported,
1830 data.m_Backends,
1831 isSupported,
1832 layer->GetOutputSlot(0).GetTensorInfo(),
1833 afterConcatInfo,
1834 reshapeDescriptor);
1835 if (!isSupported)
1836 {
1837 return false;
1838 }
1839
Mike Kellyb8805202019-07-31 17:25:43 +01001840 layer = &AddReshapeLayer(
1841 *data.m_Network,
1842 layer->GetOutputSlot(0),
1843 afterConcatInfo
1844 );
1845 }
1846
1847 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1848}
1849
1850template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001851 typename HalOperation = typename HalPolicy::Operation,
1852 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001853bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1854{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001855 using HalOperand = typename HalPolicy::Operand;
1856 using HalOperandType = typename HalPolicy::OperandType;
1857
1858 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001859 if (!input.IsValid())
1860 {
1861 return Fail("%s: Operation has invalid inputs", __func__);
1862 }
1863
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001864 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001865 if (!output)
1866 {
1867 return Fail("%s: Could not read output 0", __func__);
1868 }
1869
1870 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001871 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001872
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001873 if (IsDynamicTensor(outputInfo))
1874 {
1875 return Fail("%s: Dynamic output tensors are not supported", __func__);
1876 }
1877
Mike Kellyb5fdf382019-06-11 16:35:25 +01001878 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001879 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1880 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001881
1882 if (!weightsPin.IsValid() || !biasPin.IsValid())
1883 {
1884 return Fail("%s: Operation has invalid inputs", __func__);
1885 }
1886
1887 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001888 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001889 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1890
1891 armnn::Convolution2dDescriptor desc;
1892 desc.m_DataLayout = armnn::DataLayout::NHWC;
1893 ActivationFn activation;
1894
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001895 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001896 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001897 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1898 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1899 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1900 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1901 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1902 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001903 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001904 {
1905 return Fail("%s: Operation has invalid inputs", __func__);
1906 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001907 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001908 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001909 {
1910 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001911 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1912 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1913 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001914 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001915 {
1916 return Fail("%s: Operation has invalid inputs", __func__);
1917 }
1918
1919 const uint32_t kernelX = weights.GetShape()[2];
1920 const uint32_t kernelY = weights.GetShape()[1];
1921 const uint32_t inputX = inputInfo.GetShape()[2];
1922 const uint32_t inputY = inputInfo.GetShape()[1];
1923
1924 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1925 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001926 }
1927 else
1928 {
1929 return Fail("%s: Unsupported number of operation inputs", __func__);
1930 }
1931
1932 desc.m_BiasEnabled = true;
1933 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1934
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001935 bool isSupported = false;
1936 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1937 IsConvolution2dSupported,
1938 data.m_Backends,
1939 isSupported,
1940 inputInfo,
1941 outputInfo,
1942 desc,
1943 weights.GetInfo(),
1944 biases);
1945 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001946 {
1947 return false;
1948 }
1949
1950 armnn::IConnectableLayer* startLayer =
1951 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1952
1953 if (!startLayer)
1954 {
1955 return Fail("%s: AddConvolution2dLayer failed", __func__);
1956 }
1957
1958 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1959
1960 if (!endLayer)
1961 {
1962 return Fail("%s: ProcessActivation failed", __func__);
1963 }
1964
1965 input.Connect(startLayer->GetInputSlot(0));
1966
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001967 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001968}
1969
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001970template<typename HalPolicy,
1971 typename HalOperation = typename HalPolicy::Operation,
1972 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001973bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1974{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001975 using HalOperand = typename HalPolicy::Operand;
1976 using HalOperandType = typename HalPolicy::OperandType;
1977
1978 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001979
1980 if (!input.IsValid())
1981 {
1982 return Fail("%s: Operation has invalid inputs", __func__);
1983 }
1984
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001985 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001986
1987 if (!output)
1988 {
1989 return Fail("%s: Could not read output 0", __func__);
1990 }
1991
1992 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001993 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001994
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001995 if (IsDynamicTensor(outputInfo))
1996 {
1997 return Fail("%s: Dynamic output tensors are not supported", __func__);
1998 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001999
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002000 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002001 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002002 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002003
2004 if (weightsOperand == nullptr)
2005 {
2006 return Fail("%s: Operand is invalid", __func__);
2007 }
2008 armnn::DepthwiseConvolution2dDescriptor desc;
2009 desc.m_DataLayout = armnn::DataLayout::NHWC;
2010
Mike Kellyb5fdf382019-06-11 16:35:25 +01002011 // Reinterpret weight data as [ H, W, I, M ]
2012 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2013 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002014 inputInfo.GetShape()[3],
2015 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002016
2017 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2018 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2019
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002020 const ConstTensorPin weightsPin =
2021 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2022 1,
2023 model,
2024 data,
2025 HWIMToMIHW,
2026 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002027
2028 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002029 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002030
2031 if (!weightsPin.IsValid() || !biasPin.IsValid())
2032 {
2033 return Fail("%s: Operation has invalid inputs", __func__);
2034 }
2035
2036 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2037 armnn::ConstTensor bias = biasPin.GetConstTensor();
2038 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2039
2040 ActivationFn activation;
2041
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002042 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002043 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002044 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2045 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2046 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2047 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2048 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2049 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002050 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002051 {
2052 return Fail("%s: Operation has invalid inputs", __func__);
2053 }
2054 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002055 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002056 {
2057 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002058 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2059 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2060 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002061 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002062 {
2063 return Fail("%s: Operation has invalid inputs", __func__);
2064 }
2065
2066 const uint32_t kernelX = weights.GetShape()[3];
2067 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002068 const uint32_t inputX = inputInfo.GetShape()[2];
2069 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002070
2071 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2072 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2073 }
2074 else
2075 {
2076 return Fail("%s: Unsupported number of operation inputs", __func__);
2077 }
2078
2079 desc.m_BiasEnabled = true;
2080 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2081
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002082 bool isSupported = false;
2083 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2084 IsDepthwiseConvolutionSupported,
2085 data.m_Backends,
2086 isSupported,
2087 inputInfo,
2088 outputInfo,
2089 desc,
2090 weights.GetInfo(),
2091 biases);
2092 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002093 {
2094 return false;
2095 }
2096
2097 armnn::IConnectableLayer* startLayer =
2098 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2099 if (!startLayer)
2100 {
2101 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2102 }
2103
2104 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2105 if (!endLayer)
2106 {
2107 return Fail("%s: ProcessActivation failed", __func__);
2108 }
2109
2110 input.Connect(startLayer->GetInputSlot(0));
2111
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002112 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002113}
2114
Mike Kelly3c673942019-07-25 09:26:06 +01002115template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002116 typename Operation = typename HalPolicy::Operation,
2117 typename Model = typename HalPolicy::Model>
2118bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002119{
Mike Kelly46272802019-08-14 17:00:48 +01002120 using Operand = typename HalPolicy::Operand;
2121
2122 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2123 if (!input.IsValid())
2124 {
2125 return Fail("%s: Operation has invalid input", __func__);
2126 }
2127
2128 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2129 if (!outputOperand)
2130 {
2131 return Fail("%s: Operation has invalid outputs", __func__);
2132 }
2133
2134 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2135 if (IsDynamicTensor(outputInfo))
2136 {
2137 return Fail("%s: Dynamic output tensors are not supported", __func__);
2138 }
2139
2140 bool isSupported = false;
2141 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2142 IsDequantizeSupported,
2143 data.m_Backends,
2144 isSupported,
2145 input.GetTensorInfo(),
2146 GetTensorInfoForOperand(*outputOperand));
2147 if (!isSupported)
2148 {
2149 return false;
2150 }
2151
2152 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2153 assert(layer != nullptr);
2154 input.Connect(layer->GetInputSlot(0));
2155
2156 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2157}
2158
2159template<typename HalPolicy,
2160 typename Operation = typename HalPolicy::Operation,
2161 typename Model = typename HalPolicy::Model>
2162bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2163{
2164 using Operand = typename HalPolicy::Operand;
2165
2166 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2167 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2168
2169 if (!input0.IsValid() || !input1.IsValid())
2170 {
2171 return Fail("%s: Operation has invalid inputs", __func__);
2172 }
2173
2174 // The FuseActivation parameter is always the input index 2
2175 // and it should be optional
2176 ActivationFn activationFunction;
2177 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2178 {
2179 return Fail("%s: Operation has invalid inputs", __func__);
2180 }
2181
2182 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2183 if (!output)
2184 {
2185 return Fail("%s: Could not read output 0", __func__);
2186 }
2187
2188 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2189 if (IsDynamicTensor(outputInfo))
2190 {
2191 return Fail("%s: Dynamic output tensors are not supported", __func__);
2192 }
2193
2194 bool isSupported = false;
2195 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2196 IsDivisionSupported,
2197 data.m_Backends,
2198 isSupported,
2199 input0.GetTensorInfo(),
2200 input1.GetTensorInfo(),
2201 outputInfo);
2202 if (!isSupported)
2203 {
2204 return false;
2205 }
2206
2207 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2208 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2209
2210 if (endLayer)
2211 {
Derek Lamberti57ea6d12019-12-19 15:45:35 +00002212 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002213 if (!isReshapeSupported)
2214 {
2215 return false;
2216 }
2217
Mike Kelly46272802019-08-14 17:00:48 +01002218 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2219 }
2220 return Fail("%s: ProcessActivation failed", __func__);
2221}
2222
2223template<typename HalPolicy,
2224 typename Operation = typename HalPolicy::Operation,
2225 typename Model = typename HalPolicy::Model>
2226bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2227{
2228 using Operand = typename HalPolicy::Operand;
2229
2230 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2231 if (!input.IsValid())
2232 {
2233 return Fail("%s: Operation has invalid inputs", __func__);
2234 }
2235
2236 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2237 if (!outputOperand)
2238 {
2239 return Fail("%s: Operation has invalid outputs", __func__);
2240 }
2241
2242 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2243 if (IsDynamicTensor(outputInfo))
2244 {
2245 return Fail("%s: Dynamic output tensors are not supported", __func__);
2246 }
2247
2248 bool isSupported = false;
2249 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2250 IsFloorSupported,
2251 data.m_Backends,
2252 isSupported,
2253 input.GetTensorInfo(),
2254 outputInfo);
2255 if (!isSupported)
2256 {
2257 return false;
2258 }
2259
2260 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2261 assert(layer != nullptr);
2262 input.Connect(layer->GetInputSlot(0));
2263
2264 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2265}
2266
2267template<typename HalPolicy,
2268 typename Operation = typename HalPolicy::Operation,
2269 typename Model = typename HalPolicy::Model>
2270bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2271{
2272 using Operand = typename HalPolicy::Operand;
2273
2274 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2275 if (!input.IsValid())
2276 {
2277 return Fail("%s: Operation has invalid inputs", __func__);
2278 }
2279
2280 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2281 if (!output)
2282 {
2283 return Fail("%s: Could not read output 0", __func__);
2284 }
2285
2286 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2287 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2288
2289 if (IsDynamicTensor(outputInfo))
2290 {
2291 return Fail("%s: Dynamic output tensors are not supported", __func__);
2292 }
2293
2294 // ArmNN does not currently support non-fixed weights or bias
2295 ConstTensorPin weightsPin =
2296 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data); // 2D
2297 ConstTensorPin biasPin =
2298 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2299
2300 if (!weightsPin.IsValid() || !biasPin.IsValid())
2301 {
2302 return Fail("%s: Operation has invalid inputs", __func__);
2303 }
2304
2305 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2306 armnn::ConstTensor bias = biasPin.GetConstTensor();
2307 armnn::TensorInfo reshapedInfo = inputInfo;
2308
2309 try
2310 {
2311 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
2312 } catch (const std::exception &e) {
2313 return Fail("%s: %s", __func__, e.what());
2314 }
2315
2316 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2317 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2318
2319 ActivationFn activationFunction;
2320 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2321 {
2322 return Fail("%s: Operation has invalid inputs", __func__);
2323 }
2324
2325 armnn::FullyConnectedDescriptor desc;
2326 desc.m_TransposeWeightMatrix = true;
2327 desc.m_BiasEnabled = true;
2328
FinnWilliamsArm20e89162020-01-08 14:57:47 +00002329 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2330 weights.GetInfo().GetShape(),
2331 outputInfo.GetShape(),
2332 desc.m_TransposeWeightMatrix))
2333 {
2334 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2335 }
2336
Mike Kelly46272802019-08-14 17:00:48 +01002337 bool isSupported = false;
2338 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2339 IsFullyConnectedSupported,
2340 data.m_Backends,
2341 isSupported,
2342 reshapedInfo,
2343 outputInfo,
2344 weights.GetInfo(),
2345 bias.GetInfo(),
2346 desc);
2347 if (!isSupported)
2348 {
2349 return false;
2350 }
2351
2352 armnn::IConnectableLayer* startLayer =
2353 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2354 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2355
2356 if (endLayer != nullptr)
2357 {
2358 if (inputInfo.GetNumDimensions() > 2U)
2359 {
2360 armnn::ReshapeDescriptor reshapeDescriptor;
2361 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2362
2363 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2364 assert(reshapeLayer != nullptr);
2365 input.Connect(reshapeLayer->GetInputSlot(0));
2366 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2367 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2368 }
2369 else
2370 {
2371 input.Connect(startLayer->GetInputSlot(0));
2372 }
2373
2374 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2375 }
2376 else
2377 {
2378 return Fail("%s: ProcessActivation failed", __func__);
2379 }
2380}
2381
2382template<typename HalPolicy,
2383 typename Operation = typename HalPolicy::Operation,
2384 typename Model = typename HalPolicy::Model>
2385bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2386{
Mike Kelly999e2092019-08-15 10:46:46 +01002387 if (operation.inputs.size() != 1)
2388 {
2389 return Fail("%s: Optional inputs are not supported", __func__);
2390 }
2391
Mike Kelly46272802019-08-14 17:00:48 +01002392 using Operand = typename HalPolicy::Operand;
2393
2394 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2395 if (!input.IsValid())
2396 {
2397 return Fail("%s: Operation has invalid inputs", __func__);
2398 }
2399
2400 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2401 if (!output)
2402 {
2403 return Fail("%s: Could not read output 0", __func__);
2404 }
2405
2406 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2407 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2408
2409 if (IsDynamicTensor(outputInfo))
2410 {
2411 return Fail("%s: Dynamic output tensors are not supported", __func__);
2412 }
2413 if (outputInfo.GetNumDimensions() != 4u)
2414 {
2415 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2416 }
2417
2418 armnn::L2NormalizationDescriptor desc;
2419 desc.m_DataLayout = armnn::DataLayout::NHWC;
2420
2421 bool isSupported = false;
2422 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2423 IsL2NormalizationSupported,
2424 data.m_Backends,
2425 isSupported,
2426 inputInfo,
2427 outputInfo,
2428 desc);
2429 if (!isSupported)
2430 {
2431 return false;
2432 }
2433
2434 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2435 assert(layer != nullptr);
2436 input.Connect(layer->GetInputSlot(0));
2437
2438 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2439}
2440
2441template<typename HalPolicy,
2442 typename Operation = typename HalPolicy::Operation,
2443 typename Model = typename HalPolicy::Model>
2444bool ConvertLocalResponseNormalization(const Operation& operation,
2445 const Model& model,
2446 ConversionData& data)
2447{
Mike Kelly999e2092019-08-15 10:46:46 +01002448 if (operation.inputs.size() != 5)
2449 {
2450 return Fail("%s: Optional inputs are not supported", __func__);
2451 }
2452
Mike Kelly46272802019-08-14 17:00:48 +01002453 using Operand = typename HalPolicy::Operand;
2454 using OperandType = typename HalPolicy::OperandType;
2455
2456 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2457 if (!input.IsValid())
2458 {
2459 return Fail("%s: Operation has invalid inputs", __func__);
2460 }
2461
2462 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2463 if (!output)
2464 {
2465 return Fail("%s: Could not read output 0", __func__);
2466 }
2467
2468 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2469 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2470
2471 if (IsDynamicTensor(outputInfo))
2472 {
2473 return Fail("%s: Dynamic output tensors are not supported", __func__);
2474 }
2475 if (outputInfo.GetNumDimensions() != 4u)
2476 {
2477 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2478 }
2479
2480 armnn::NormalizationDescriptor descriptor;
2481 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2482 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2483 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2484
2485 if (!input.IsValid() ||
2486 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2487 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2488 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2489 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2490 {
2491 return Fail("%s: Operation has invalid inputs", __func__);
2492 }
2493
2494 // ArmNN expects normSize to be the full size of the normalization
2495 // window rather than the radius as in AndroidNN.
2496 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2497
2498 bool isSupported = false;
2499 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2500 IsNormalizationSupported,
2501 data.m_Backends,
2502 isSupported,
2503 inputInfo,
2504 outputInfo,
2505 descriptor);
2506 if (!isSupported)
2507 {
2508 return false;
2509 }
2510
2511
2512 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2513 assert(layer != nullptr);
2514 input.Connect(layer->GetInputSlot(0));
2515
2516 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2517}
2518
2519template<typename HalPolicy,
2520 typename Operation = typename HalPolicy::Operation,
2521 typename Model = typename HalPolicy::Model>
2522bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2523{
2524 using Operand = typename HalPolicy::Operand;
2525
2526 armnn::ActivationDescriptor desc;
2527 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2528
2529 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2530}
2531
2532template<typename HalPolicy,
2533 typename Operation = typename HalPolicy::Operation,
2534 typename Model = typename HalPolicy::Model>
2535bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2536{
2537 using Operand = typename HalPolicy::Operand;
2538
2539 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2540 if (!input.IsValid())
2541 {
2542 return Fail("%s: Operation has invalid inputs", __func__);
2543 }
2544
2545 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2546 if (!output)
2547 {
2548 return Fail("%s: Could not read output 0", __func__);
2549 }
2550
2551 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2552 if (IsDynamicTensor(outputInfo))
2553 {
2554 return Fail("%s: Dynamic output tensors are not supported", __func__);
2555 }
2556
2557 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2558 if (!axisOperand)
2559 {
2560 return Fail("%s: Could not read input 1", __func__);
2561 }
2562
2563 std::vector<int32_t> axis;
2564 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2565 {
2566 return Fail("%s: Input 1 has invalid values", __func__);
2567 }
2568
2569 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2570
2571 // Convert the axis to unsigned int and remove duplicates.
2572 unsigned int rank = inputInfo.GetNumDimensions();
2573 std::set<unsigned int> uniqueAxis;
2574 std::transform(axis.begin(), axis.end(),
2575 std::inserter(uniqueAxis, uniqueAxis.begin()),
2576 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2577
2578 // Get the "keep dims" flag.
2579 int32_t keepDims = 0;
2580 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2581 {
2582 return Fail("%s: Could not read input 2", __func__);
2583 }
2584
2585 armnn::MeanDescriptor descriptor;
2586 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2587 descriptor.m_KeepDims = keepDims > 0;
2588
2589 bool isSupported = false;
2590 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2591 IsMeanSupported,
2592 data.m_Backends,
2593 isSupported,
2594 inputInfo,
2595 outputInfo,
2596 descriptor);
2597 if (!isSupported)
2598 {
2599 return false;
2600 }
2601
2602 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2603 assert(layer != nullptr);
2604 input.Connect(layer->GetInputSlot(0));
2605
2606 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2607}
2608
2609template<typename HalPolicy,
2610 typename Operation = typename HalPolicy::Operation,
2611 typename Model = typename HalPolicy::Model>
2612bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2613{
2614 using Operand = typename HalPolicy::Operand;
2615
2616 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2617 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2618
2619 if (!input0.IsValid() || !input1.IsValid())
2620 {
2621 return Fail("%s: Operation has invalid inputs", __func__);
2622 }
2623
2624 // The FuseActivation parameter is always the input index 2
2625 // and it should be optional
2626 ActivationFn activationFunction;
2627 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2628 {
2629 return Fail("%s: Operation has invalid inputs", __func__);
2630 }
2631
2632 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2633
2634 if (outputOperand == nullptr)
2635 {
2636 return false;
2637 }
2638
2639 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2640 if (IsDynamicTensor(outputInfo))
2641 {
2642 return Fail("%s: Dynamic output tensors are not supported", __func__);
2643 }
2644
2645 bool isSupported = false;
2646 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2647 IsMultiplicationSupported,
2648 data.m_Backends,
2649 isSupported,
2650 input0.GetTensorInfo(),
2651 input1.GetTensorInfo(),
2652 outputInfo);
2653 if (!isSupported)
2654 {
2655 return false;
2656 }
2657
2658 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2659 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2660
2661 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2662 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2663
2664 if (endLayer != nullptr)
2665 {
Derek Lamberti57ea6d12019-12-19 15:45:35 +00002666 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002667 if (!isReshapeSupported)
2668 {
2669 return false;
2670 }
2671
Mike Kelly46272802019-08-14 17:00:48 +01002672 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2673 }
2674 else
2675 {
2676 return Fail("%s: ProcessActivation failed", __func__);
2677 }
2678}
2679
2680template<typename HalPolicy,
2681 typename Operation = typename HalPolicy::Operation,
2682 typename Model = typename HalPolicy::Model>
2683bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2684{
2685 using Operand = typename HalPolicy::Operand;
2686
Mike Kelly3c673942019-07-25 09:26:06 +01002687 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2688 if (!input.IsValid())
2689 {
2690 return Fail("%s: Operation has invalid inputs", __func__);
2691 }
2692
2693 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2694 unsigned int rank = inputInfo.GetNumDimensions();
2695
2696 armnn::PadDescriptor descriptor;
2697 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2698 {
2699 return Fail("%s: Could not convert paddings", __func__);
2700 }
2701
2702 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2703 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2704 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2705 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2706 {
2707 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2708 }
2709
Mike Kelly46272802019-08-14 17:00:48 +01002710 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002711 if (!output)
2712 {
2713 return Fail("%s: Could not read output", __func__);
2714 }
2715
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002716 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002717 if (IsDynamicTensor(outputInfo))
2718 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002719 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002720 }
2721
2722 bool isSupported = false;
2723 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2724 IsPadSupported,
2725 data.m_Backends,
2726 isSupported,
2727 inputInfo,
2728 outputInfo,
2729 descriptor);
2730 if (!isSupported)
2731 {
2732 return false;
2733 }
2734
2735 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2736 assert(layer != nullptr);
2737 input.Connect(layer->GetInputSlot(0));
2738 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2739
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002740 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002741}
2742
Mike Kelly0a879362019-07-29 16:56:31 +01002743template<typename HalPolicy,
2744 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002745 typename Model = typename HalPolicy::Model>
2746bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2747{
2748 using Operand = typename HalPolicy::Operand;
2749
2750 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2751 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2752 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2753
2754 if (inputOperand == nullptr
2755 || requestedShapeOperand == nullptr
2756 || outputOperand == nullptr)
2757 {
2758 return Fail("%s: Operation has invalid inputs", __func__);
2759 }
2760
2761 if (requestedShapeOperand->dimensions.size() != 1)
2762 {
2763 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2764 __func__, requestedShapeOperand->dimensions.size());
2765 }
2766
2767 std::vector<int32_t> targetDimensions;
2768 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2769 {
2770 return Fail("%s: Could not read values of input 1", __func__);
2771 }
2772
2773 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2774
2775 Shape requestedShape;
2776 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2777 // function that resolves these values into a fully specified tensor shape.
2778 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2779 {
2780 return Fail("%s: Failed to resolve the requested shape", __func__);
2781 }
2782
2783 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2784 if (!SameShape(requestedShape, outputOperandShape))
2785 {
2786 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2787 }
2788
2789 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2790 if (!input.IsValid())
2791 {
2792 return Fail("%s: Could not read input 0", __func__);
2793 }
2794
2795 armnn::ReshapeDescriptor reshapeDescriptor;
2796 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2797 requestedShape.dimensions.data());
2798
2799 bool isSupported = false;
2800 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2801 IsReshapeSupported,
2802 data.m_Backends,
2803 isSupported,
2804 input.GetTensorInfo(),
Kevin May784a04b2019-12-12 16:33:31 +00002805 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01002806 reshapeDescriptor);
2807 if (!isSupported)
2808 {
2809 return false;
2810 }
2811
2812 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2813 assert(layer != nullptr);
2814 input.Connect(layer->GetInputSlot(0));
2815
2816 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2817}
2818
2819template<typename HalPolicy,
2820 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002821 typename Model = typename HalPolicy::Model>
2822bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2823{
Mike Kelly46272802019-08-14 17:00:48 +01002824 using Operand = typename HalPolicy::Operand;
2825
Mike Kelly0a879362019-07-29 16:56:31 +01002826 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2827 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2828
2829 if (!input0.IsValid() || !input1.IsValid())
2830 {
2831 return Fail("%s: Operation has invalid inputs", __func__);
2832 }
2833
2834 // The FuseActivation parameter is always the input index 2
2835 // and it should be optional
2836 ActivationFn activationFunction;
2837 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2838 {
2839 return Fail("%s: Operation has invalid inputs", __func__);
2840 }
2841
2842 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2843 if (!output)
2844 {
2845 return Fail("%s: Could not read output 0", __func__);
2846 }
2847
2848 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2849 if (IsDynamicTensor(outputInfo))
2850 {
2851 return Fail("%s: Dynamic output tensors are not supported", __func__);
2852 }
2853
2854 bool isSupported = false;
2855 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2856 IsSubtractionSupported,
2857 data.m_Backends,
2858 isSupported,
2859 input0.GetTensorInfo(),
2860 input1.GetTensorInfo(),
2861 outputInfo);
2862 if (!isSupported)
2863 {
2864 return false;
2865 }
2866
2867 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2868 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2869
2870 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2871 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2872
2873 if (endLayer)
2874 {
Derek Lamberti57ea6d12019-12-19 15:45:35 +00002875 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002876 if (!isReshapeSupported)
2877 {
2878 return false;
2879 }
Mike Kelly0a879362019-07-29 16:56:31 +01002880 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2881 }
2882
2883 return Fail("%s: ProcessActivation failed", __func__);
2884}
2885
Finn Williams23b87b32019-07-30 11:44:05 +01002886template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002887 typename Operation = typename HalPolicy::Operation,
2888 typename Model = typename HalPolicy::Model>
2889bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2890{
2891 using Operand = typename HalPolicy::Operand;
2892
2893 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2894 if (!input.IsValid())
2895 {
2896 return Fail("%s: Operation has invalid inputs", __func__);
2897 }
2898
2899 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2900 unsigned int rank = inputInfo.GetNumDimensions();
2901 if (rank > 4)
2902 {
2903 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2904 }
2905
2906 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2907 if (!output)
2908 {
2909 return Fail("%s: Could not read output 0", __func__);
2910 }
2911
2912 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
2913 {
2914 return Fail("%s: Dynamic output tensors are not supported", __func__);
2915 }
2916
2917 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
2918 // if the operand index is out of bounds.
2919 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
2920
2921 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2922
2923 std::vector<int32_t> axis;
2924 if (!axisOperand)
2925 {
2926 axis.assign(dimensionSequence,
2927 dimensionSequence + rank);
2928 }
2929 else
2930 {
2931 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
2932 }
2933
2934 std::vector<uint32_t> outputDims;
2935 for (unsigned int i = 0; i < rank; i++)
2936 {
2937 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
2938 auto currentDimension = inputInfo.GetShape()[i];
2939 if (skipSqueeze || currentDimension != 1)
2940 {
2941 outputDims.push_back(currentDimension);
2942 }
2943 }
2944
2945 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
2946
2947 armnn::TensorInfo outputInfo = inputInfo;
2948 outputInfo.SetShape(outShape);
2949
2950 armnn::ReshapeDescriptor reshapeDesc;
2951 reshapeDesc.m_TargetShape = outputInfo.GetShape();
2952
2953 bool isSupported = false;
2954 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2955 IsReshapeSupported,
2956 data.m_Backends,
2957 isSupported,
2958 inputInfo,
Kevin May784a04b2019-12-12 16:33:31 +00002959 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01002960 reshapeDesc);
2961 if (!isSupported)
2962 {
2963 return false;
2964 }
2965
2966 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
2967 assert(layer != nullptr);
2968 input.Connect(layer->GetInputSlot(0));
2969
2970 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2971}
2972
2973template<typename HalPolicy,
2974 typename Operation = typename HalPolicy::Operation,
2975 typename Model = typename HalPolicy::Model>
2976bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2977{
2978 using Operand = typename HalPolicy::Operand;
2979
2980 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2981 if (!input.IsValid())
2982 {
2983 return Fail("%s: Operation has invalid inputs", __func__);
2984 }
2985
2986 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2987 unsigned int rank = inputInfo.GetNumDimensions();
2988 if (rank > 4)
2989 {
2990 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
2991 }
2992
2993 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2994 if (!output)
2995 {
2996 return Fail("%s: Could not read output 0", __func__);
2997 }
2998
2999 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3000 if (IsDynamicTensor(outputInfo))
3001 {
3002 return Fail("%s: Dynamic output tensors are not supported", __func__);
3003 }
3004
3005 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3006 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3007 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3008
3009 std::vector<int32_t> beginValues;
3010 std::vector<int32_t> endValues;
3011 std::vector<int32_t> stridesValues;
3012
3013 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3014 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
3015 {
3016 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3017 {
3018 return false;
3019 }
3020
3021 if (operandValues.size() != rank)
3022 {
3023 return false;
3024 }
3025
3026 return true;
3027 };
3028
3029 if (!ValidateInputOperands(*beginOperand, beginValues)
3030 || !ValidateInputOperands(*endOperand, endValues)
3031 || !ValidateInputOperands(*stridesOperand, stridesValues))
3032 {
3033 return Fail("%s: Operation has invalid input operand", __func__);
3034 }
3035
3036 // Stride cannot have value '0'
3037 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3038 {
3039 return Fail("%s: Stride must be non-zero value.", __func__);
3040 }
3041
3042 armnn::StridedSliceDescriptor descriptor;
3043 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3044 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3045 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3046 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3047
3048 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3049 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3050 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3051 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3052 {
3053 return Fail("%s: Operation has invalid inputs", __func__);
3054 }
3055
3056 bool isSupported = false;
3057 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3058 IsStridedSliceSupported,
3059 data.m_Backends,
3060 isSupported,
3061 inputInfo,
3062 outputInfo,
3063 descriptor);
3064 if (!isSupported)
3065 {
3066 return false;
3067 }
3068
3069 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3070 assert(layer != nullptr);
3071 input.Connect(layer->GetInputSlot(0));
3072
3073 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3074}
3075
3076template<typename HalPolicy,
3077 typename Operation = typename HalPolicy::Operation,
3078 typename Model = typename HalPolicy::Model>
3079bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3080{
3081 using Operand = typename HalPolicy::Operand;
3082
3083 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3084 if (!input.IsValid())
3085 {
3086 return Fail("%s: Operation has invalid inputs", __func__);
3087 }
3088
3089 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3090 unsigned int rank = inputInfo.GetNumDimensions();
3091 if (rank > 4)
3092 {
3093 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3094 }
3095
3096 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3097 // if the operand index is out of bounds.
3098 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3099
3100 std::vector<int32_t> perm(rank);
3101 if (!permOperand)
3102 {
3103 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3104 for (unsigned int i = rank; i > 0; i--)
3105 {
3106 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3107 }
3108 }
3109 else
3110 {
3111 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3112 }
3113
3114 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3115
3116 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3117 if (!permutationVector.IsEqual(NHWCToArmNN)
3118 && !permutationVector.IsEqual(ArmNNToNHWC)
3119 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3120 {
3121 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3122 }
3123
3124 armnn::PermuteDescriptor permuteDesc;
3125 permuteDesc.m_DimMappings = permutationVector;
3126
3127 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3128 if (!output)
3129 {
3130 return Fail("%s: Could not read output 0", __func__);
3131 }
3132
3133 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3134
3135 bool isSupported = false;
3136 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3137 IsPermuteSupported,
3138 data.m_Backends,
3139 isSupported,
3140 inputInfo,
3141 outputInfo,
3142 permuteDesc);
3143 if (!isSupported)
3144 {
3145 return false;
3146 }
3147
3148 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3149 assert(layer != nullptr);
3150 input.Connect(layer->GetInputSlot(0));
3151
3152 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3153}
3154
3155template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003156 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003157 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003158 typename HalModel = typename HalPolicy::Model>
3159bool ConvertBatchToSpaceNd(const HalOperation& operation,
3160 const HalModel& model,
3161 ConversionData& data)
3162{
Finn Williams23b87b32019-07-30 11:44:05 +01003163
3164 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3165 if (!input.IsValid())
3166 {
3167 return Fail("%s: Operation has invalid inputs", __func__);
3168 }
3169
3170 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3171 if (!output)
3172 {
3173 return Fail("%s: Could not read output 0", __func__);
3174 }
3175
3176 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3177 if (IsDynamicTensor(outputInfo))
3178 {
3179 return Fail("%s: Dynamic output tensors are not supported", __func__);
3180 }
3181
3182 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3183 if (!blockOperand)
3184 {
3185 return Fail("%s: Could not read input 1", __func__);
3186 }
3187
3188 // Convert the block operand to int32
3189 std::vector<int32_t> block;
3190 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3191 {
3192 return Fail("%s: Input 1 has invalid values", __func__);
3193 }
3194
3195 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3196
3197 unsigned int rank = inputInfo.GetNumDimensions();
3198 if (rank != 4)
3199 {
3200 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3201 }
3202
3203 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3204 {
3205 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3206 " greater than or equal to 1", __func__);
3207 }
3208
3209 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3210 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3211 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3212
3213 if (Is12Operand(*output))
3214 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003215 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003216 }
3217 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3218 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3219
3220 bool isSupported = false;
3221 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3222 IsBatchToSpaceNdSupported,
3223 data.m_Backends,
3224 isSupported,
3225 inputInfo,
3226 outputInfo,
3227 batchToSpaceNdDesc);
3228 if (!isSupported)
3229 {
3230 return false;
3231 }
3232
3233 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3234 assert(layer != nullptr);
3235 input.Connect(layer->GetInputSlot(0));
3236
3237 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3238}
Mike Kelly0a879362019-07-29 16:56:31 +01003239
Finn Williamsd74c5052019-07-30 17:06:00 +01003240template<typename HalPolicy,
3241 typename HalOperation = typename HalPolicy::Operation,
3242 typename HalOperand = typename HalPolicy::Operand,
3243 typename HalModel = typename HalPolicy::Model>
3244bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3245{
3246 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3247 if (!input.IsValid())
3248 {
3249 return Fail("%s: Operation has invalid inputs", __func__);
3250 }
3251
3252 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3253 unsigned int rank = inputInfo.GetNumDimensions();
3254 unsigned int spatialDim = rank - 2;
3255
3256 if (rank != 4)
3257 {
3258 Fail("%s: Only inputs with rank 4 are supported", __func__);
3259 }
3260
3261 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3262 if (!output)
3263 {
3264 return Fail("%s: Could not read output 0", __func__);
3265 }
3266
3267 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3268 if (IsDynamicTensor(outputInfo))
3269 {
3270 return Fail("%s: Dynamic output tensors are not supported", __func__);
3271 }
3272
3273 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3274 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3275
3276 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3277 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3278 {
3279 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3280 }
3281
3282 std::vector<int32_t> blockShape;
3283 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3284 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3285 {
3286 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3287 }
3288
3289 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3290 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3291 {
3292 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3293 }
3294
3295 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3296 std::vector<int32_t> paddings;
3297 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3298 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3299 {
3300 int paddingBeforeInput = paddings[i];
3301 int paddingAfterInput = paddings[i + 1];
3302 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3303 {
3304 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3305 }
3306
3307 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3308 }
3309
3310 armnn::SpaceToBatchNdDescriptor descriptor;
3311 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3312 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3313 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3314
3315 if (Is12Operand(*output))
3316 {
3317 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3318 }
3319
3320 bool isSupported = false;
3321 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3322 IsSpaceToBatchNdSupported,
3323 data.m_Backends,
3324 isSupported,
3325 inputInfo,
3326 outputInfo,
3327 descriptor);
3328 if (!isSupported)
3329 {
3330 return false;
3331 }
3332
3333 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3334 assert(layer != nullptr);
3335 input.Connect(layer->GetInputSlot(0));
3336
3337 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3338}
3339
saoste01b8471482018-10-10 09:44:51 +01003340} // namespace armnn_driver