blob: 760543ad0efdf36d682a2e224c5f5b70fc377f14 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
191 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100192 type == V1_2::OperandType::TENSOR_INT32;
193}
194
195#endif
196
197inline bool IsBool(V1_0::Operand)
198{
199 return false;
200}
201
Sadik Armagan61113162019-07-25 09:09:40 +0100202inline bool Is12Operand(V1_0::Operand)
203{
204 return false;
205}
206
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207#ifdef ARMNN_ANDROID_NN_V1_2
208
209inline bool IsBool(V1_2::Operand operand)
210{
211 return operand.type == V1_2::OperandType::BOOL;
212}
213
Sadik Armagan61113162019-07-25 09:09:40 +0100214/// Checks if a operand is 1_2 Operand
215inline bool Is12Operand(V1_2::Operand)
216{
217 return true;
218}
219
Mike Kellyb5fdf382019-06-11 16:35:25 +0100220#endif
221
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100222template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000223armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
224 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100225 armnn::TensorInfo reshapeInfo)
226{
227 armnn::ReshapeDescriptor reshapeDescriptor;
228 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
229
230 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
231 BOOST_ASSERT(reshapeLayer != nullptr);
232
233 // Attach the input layer to the reshape layer
234 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
235 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
236
237 return *reshapeLayer;
238}
239
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000240bool BroadcastTensor(LayerInputHandle& input0,
241 LayerInputHandle& input1,
242 armnn::IConnectableLayer* startLayer,
243 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100244{
245 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100246
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100247 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
248 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
249
250 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
251 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
252
253 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100254 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100255 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
256 input0.Connect(startLayer->GetInputSlot(0));
257 input1.Connect(startLayer->GetInputSlot(1));
258
Sadik Armagan64b19b52019-08-19 09:49:58 +0100259 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100260 }
261
262 // Since the number of dimensions do not match then we need to add degenerate dimensions
263 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
264
265 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
266 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
267 boost::numeric_cast<int>(inputDimensions1));
268
269 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
270 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
271 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
272
273 const armnn::TensorShape& smallShape = smallInfo.GetShape();
274 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
275 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
276 {
277 reshapedDimensions[i] = smallShape[i - sizeDifference];
278 }
279
280 armnn::TensorInfo reshapedInfo = smallInfo;
281 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
282 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100283
284 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
285 armnn::ReshapeDescriptor reshapeDescriptor;
286
287 bool isSupported = false;
288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
289 IsReshapeSupported,
290 data.m_Backends,
291 isSupported,
292 reshapedInfo,
293 reshapeDescriptor);
294 if (!isSupported)
295 {
296 return false;
297 }
298
299 BOOST_ASSERT(data.m_Network != nullptr);
300 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301
302 if (input0IsSmaller)
303 {
304 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
305 //
306 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100307 // | |
308 // Reshape |
309 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100311
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100312 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
313 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100314 }
315 else
316 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
318 //
319 // Input0 Input1
320 // | |
321 // | Reshape
322 // \ /
323 // StartLayer
324
arovir01b0717b52018-09-05 17:03:25 +0100325 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100326 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100327 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100328
329 return true;
arovir01b0717b52018-09-05 17:03:25 +0100330}
331
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000332void CalcPadding(uint32_t input,
333 uint32_t kernel,
334 uint32_t stride,
335 uint32_t& outPadHead,
336 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100337 android::nn::PaddingScheme scheme)
338{
339 int32_t padHead;
340 int32_t padTail;
341 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
342 outPadHead = boost::numeric_cast<uint32_t>(padHead);
343 outPadTail = boost::numeric_cast<uint32_t>(padTail);
344}
345
Mike Kelly86b36d42019-07-12 16:39:33 +0100346#ifdef ARMNN_ANDROID_NN_V1_2
347
348void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
349 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
350{
351 int32_t padHead;
352 int32_t padTail;
353 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
354 outPadHead = boost::numeric_cast<uint32_t>(padHead);
355 outPadTail = boost::numeric_cast<uint32_t>(padTail);
356}
357
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100358void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
359 int32_t& outPadTail, android::nn::PaddingScheme scheme)
360{
361 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
362}
363
Mike Kelly86b36d42019-07-12 16:39:33 +0100364#endif
365
Matthew Bentham912b3622019-05-03 15:49:14 +0100366Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100367{
368 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100369 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100370 shape.dimensions = operand.dimensions;
371 shape.scale = operand.scale;
372 shape.offset = operand.zeroPoint;
373 return shape;
374}
375
Mike Kelly46272802019-08-14 17:00:48 +0100376#ifdef ARMNN_ANDROID_NN_V1_2
377
378Shape GetOperandShape(const V1_2::Operand& operand)
379{
380 Shape shape;
381 shape.type = OperandType(operand.type);
382 shape.dimensions = operand.dimensions;
383 shape.scale = operand.scale;
384 shape.offset = operand.zeroPoint;
385 return shape;
386}
387
388#endif
389
arovir01b0717b52018-09-05 17:03:25 +0100390// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
391// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100392// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
393// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100394void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000395 const armnn::TensorInfo& weightInfo,
396 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100397{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100399 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000400 // NOTE: Bias scale is always set to 0 for per-axis quantization and
401 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
402 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100403 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000404 return biasScale * inputInfo.GetQuantizationScale();
405 };
406
407 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
408 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
409
410 biasInfo.SetQuantizationScales(biasScales);
411 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
412
413 ALOGV("Bias quantization params have been updated for per-axis quantization");
414 }
415 else
416 {
417 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
418 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
419 {
420 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
421 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
422 {
423 ALOGW("Bias quantization scale has been modified to match input * weights");
424 biasInfo.SetQuantizationScale(expectedBiasScale);
425 }
arovir01b0717b52018-09-05 17:03:25 +0100426 }
427 }
428}
429
430// 4D Tensor Permutations
431const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
432const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
433const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
434const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
435
436// 3D Permutation Vectors
437const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
438const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
439const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
440
441template<typename OSlot>
442armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
443 const armnn::PermutationVector& mappings)
444{
445 // Add swizzle layer
446 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
447
448 BOOST_ASSERT(layer != nullptr);
449
450 // Connect input to swizzle layer
451 input.Connect(layer->GetInputSlot(0));
452
453 // Setup swizzled output
454 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
455 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
456
457 return *layer;
458}
459
460void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
461{
462 // Add swizzle layer
463 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
464 // Connect swizzled input to layer
465 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
466}
467
468armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
469{
470 // Add deswizzle layer
471 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
472 return deswizzleLayer;
473}
474
475// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
476armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
477 LayerInputHandle& input,
478 armnn::IConnectableLayer& firstLayer,
479 armnn::IConnectableLayer& lastLayer)
480{
481 SwizzleIn(network, input, firstLayer, 0);
482 return DeswizzleOut(network, lastLayer, 0);
483}
484
485// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
486armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
487 armnn::IConnectableLayer& layer)
488{
489 return SwizzleInDeswizzleOut(network, input, layer, layer);
490}
491
492bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
493 const armnn::TensorShape & outputShape,
494 uint32_t concatDim)
495{
496 // Validate the output shape is correct given the input shapes (which have just been validated)
497 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
498 if (outputShape.GetNumDimensions() != numDimensions)
499 {
500 return Fail("%s: Output shape has wrong number of dimensions", __func__);
501 }
502
503 unsigned int outputSizeAlongConcatenatedDimension = 0;
504 for (unsigned int i = 0; i < inputShapes.size(); i++)
505 {
506 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
507 }
508
509 for (unsigned int i = 0; i < numDimensions; ++i)
510 {
511 if (i == concatDim)
512 {
513 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
514 {
515 return Fail(
516 "%s: Invalid output shape for dimension %d (%d != %d)",
517 __func__,
518 i,
519 outputShape[i],
520 outputSizeAlongConcatenatedDimension);
521 }
522 }
523 else
524 {
525 if (outputShape[i] != inputShapes[0][i])
526 {
527 return Fail("%s: Invalid output shape", __func__);
528 }
529 }
530 }
531
532 return true;
533}
534
535bool RequiresReshape(armnn::TensorShape & inputShape)
536{
537 return inputShape.GetNumDimensions() < 3;
538}
539
arovir01b0717b52018-09-05 17:03:25 +0100540void SwizzleInputs(armnn::INetwork& network,
541 std::vector<LayerInputHandle>& inputs,
542 std::vector<armnn::TensorShape>& inputShapes,
543 const armnn::PermutationVector& mapping)
544{
545 if (!mapping.IsEqual(IdentityPermutation4D))
546 {
547 size_t nInputs = inputs.size();
548 for (size_t i=0; i<nInputs; ++i)
549 {
550 // add swizzle layer
551 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
552 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
553 auto& outputInfo = outputSlot.GetTensorInfo();
554 // replace inputs with the swizzled ones
555 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
556 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
557 }
558 }
559}
560
narpra01f176d5a2018-11-18 20:17:48 +0000561bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
562 int32_t & concatDimension,
563 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100564{
narpra01f176d5a2018-11-18 20:17:48 +0000565 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100566 BOOST_ASSERT(numberOfDimensions >= 3);
567
568 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000569 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
570 // or along dimension 0 or 2 for a 3-D tensor.
571 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100572 {
narpra01f176d5a2018-11-18 20:17:48 +0000573 concatDimension = 1;
574 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
575 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100576 }
narpra01f176d5a2018-11-18 20:17:48 +0000577 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100578 {
narpra01f176d5a2018-11-18 20:17:48 +0000579 concatDimension = 0;
580 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
581 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100582 }
narpra01f176d5a2018-11-18 20:17:48 +0000583 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100584}
585
586} // anonymous namespace
587
588namespace armnn_driver
589{
590
591//// Creates an ArmNN activation layer and connects it to the given layer, if the
592//// passed in AndroidNN activation function requires so.
593//// @return The end layer of the sequence of layers built for the given AndroidNN
594//// activation function or nullptr if an error occurred (e.g. unsupported activation).
595//// Note that the end layer matches the input layer if no activation is required
596//// (the sequence of layers has length 1).
597armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
598 ActivationFn activation,
599 armnn::IConnectableLayer* prevLayer,
600 ConversionData& data);
601
602} // namespace armnn_driver
603
604///
605/// Utility templates
606///
607
608namespace armnn_driver
609{
610
611using namespace android::nn;
612
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100613template<typename HalPolicy,
614 typename HalOperand = typename HalPolicy::Operand,
615 typename HalOperation = typename HalPolicy::Operation,
616 typename HalModel = typename HalPolicy::Model>
617const HalOperand* GetInputOperand(const HalOperation& operation,
618 uint32_t inputIndex,
619 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100620 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100621{
622 if (inputIndex >= operation.inputs.size())
623 {
saoste01b8471482018-10-10 09:44:51 +0100624 if (failOnIndexOutOfBounds)
625 {
626 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
627 }
arovir01b0717b52018-09-05 17:03:25 +0100628 return nullptr;
629 }
630
631 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
632 return &model.operands[operation.inputs[inputIndex]];
633}
634
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100635template<typename HalPolicy,
636 typename HalOperand = typename HalPolicy::Operand,
637 typename HalOperation = typename HalPolicy::Operation,
638 typename HalModel = typename HalPolicy::Model>
639const HalOperand* GetOutputOperand(const HalOperation& operation,
640 uint32_t outputIndex,
641 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100642{
643 if (outputIndex >= operation.outputs.size())
644 {
645 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
646 return nullptr;
647 }
648
649 // Model should have been validated beforehand
650 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
651
652 return &model.operands[operation.outputs[outputIndex]];
653}
654
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100656 typename HalOperand = typename HalPolicy::Operand,
657 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100658const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100659 const HalModel& model,
660 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000661 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100662{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100663 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100664
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100666 switch (operand.lifetime)
667 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100668 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100669 {
670 // Constant found in model.operandValues
671 valueStart = &model.operandValues[operand.location.offset];
672 break;
673 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100675 {
676 // Constant specified via a Memory object
677 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
678 break;
679 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100680 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000681 {
682 // An optional input tensor with no values is not an error so should not register as a fail
683 if (optional)
684 {
685 valueStart = nullptr;
686 break;
687 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100688 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000689 }
arovir01b0717b52018-09-05 17:03:25 +0100690 default:
691 {
692 // Unsupported/invalid (e.g. can't get value of an input to the model)
693 Fail("%s: unsupported/invalid operand lifetime: %s",
694 __func__, toString(operand.lifetime).c_str());
695 valueStart = nullptr;
696 }
697 }
698
699 return valueStart;
700}
701
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100702template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100703 typename HalOperation = typename HalPolicy::Operation,
704 typename HalModel = typename HalPolicy::Model,
705 typename HalOperandType = typename HalPolicy::OperandType>
706bool GetOperandType(const HalOperation& operation,
707 uint32_t inputIndex,
708 const HalModel& model,
709 HalOperandType& type)
710{
711 using HalOperand = typename HalPolicy::Operand;
712
713 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
714 if (!operand)
715 {
716 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
717 }
718
719 type = operand->type;
720 return true;
721}
722
723template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000724 typename HalOperand = typename HalPolicy::Operand>
725bool IsOperandConstant(const HalOperand& operand)
726{
727 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
728
729 HalOperandLifeTime lifetime = operand.lifetime;
730
731 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
732 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
733 lifetime == HalOperandLifeTime::NO_VALUE;
734}
735
736template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 typename HalOperand = typename HalPolicy::Operand,
738 typename HalModel = typename HalPolicy::Model>
739ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
740 const HalModel& model,
741 const ConversionData& data,
742 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
743 const armnn::TensorShape* overrideTensorShape = nullptr,
744 bool optional = false)
745{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 if (!IsOperandTypeSupportedForTensors(operand.type))
747 {
748 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
749 return ConstTensorPin();
750 }
751
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000752 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 {
754 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
755 return ConstTensorPin();
756 }
757
758 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
759 if (!valueStart)
760 {
761 if (optional)
762 {
763 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
764 return ConstTensorPin(true);
765 }
766 // mandatory tensor with no values
767 Fail("%s: failed to get operand address", __func__);
768 return ConstTensorPin();
769 }
770
771 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000772 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
773 if (tensorInfo.HasPerAxisQuantization())
774 {
775 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
776 }
777
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100778 if (overrideTensorShape != nullptr)
779 {
780 tensorInfo.SetShape(*overrideTensorShape);
781 }
782 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
783}
784
785template<typename HalPolicy,
786 typename HalOperation = typename HalPolicy::Operation,
787 typename HalModel = typename HalPolicy::Model>
788ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
789 uint32_t inputIndex,
790 const HalModel& model,
791 const ConversionData& data,
792 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
793 const armnn::TensorShape* overrideTensorShape = nullptr,
794 bool optional = false)
795{
796 using HalOperand = typename HalPolicy::Operand;
797
798 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
799 if (!operand)
800 {
801 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
802 return ConstTensorPin();
803 }
804 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
805 model,
806 data,
807 dimensionMappings,
808 overrideTensorShape,
809 optional);
810}
811
812template<typename HalPolicy,
813 typename OutputType,
814 typename HalOperandType = typename HalPolicy::OperandType,
815 typename HalOperation = typename HalPolicy::Operation,
816 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100817bool GetInputScalar(const HalOperation& operation,
818 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100819 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100820 OutputType& outValue,
821 const HalModel& model,
822 const ConversionData& data)
823{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100824 using HalOperand = typename HalPolicy::Operand;
825
826 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100827 if (!operand)
828 {
829 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
830 }
831
832 if (operand->type != type)
833 {
834 return Fail("%s: unexpected operand type: %s (should be %s)",
835 __func__, toString(operand->type).c_str(), toString(type).c_str());
836 }
837
838 if (operand->location.length != sizeof(OutputType))
839 {
840 return Fail("%s: incorrect operand location length: %i (should be %i)",
841 __func__, operand->location.length, sizeof(OutputType));
842 }
843
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100844 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100845 if (!valueAddress)
846 {
847 return Fail("%s: failed to get address for operand", __func__);
848 }
849
850 outValue = *(static_cast<const OutputType*>(valueAddress));
851 return true;
852}
853
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100854template<typename HalPolicy,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100857bool GetInputInt32(const HalOperation& operation,
858 uint32_t inputIndex,
859 int32_t& outValue,
860 const HalModel& model,
861 const ConversionData& data)
862{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100863 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100864}
865
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100866template<typename HalPolicy,
867 typename HalOperation = typename HalPolicy::Operation,
868 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100869bool GetInputFloat32(const HalOperation& operation,
870 uint32_t inputIndex,
871 float& outValue,
872 const HalModel& model,
873 const ConversionData& data)
874{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100875 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100876}
877
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878template<typename HalPolicy,
879 typename HalOperation = typename HalPolicy::Operation,
880 typename HalOperandType = typename HalPolicy::OperandType,
881 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100882bool GetInputActivationFunctionImpl(const HalOperation& operation,
883 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100884 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100885 ActivationFn& outActivationFunction,
886 const HalModel& model,
887 const ConversionData& data)
888{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100889 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100890 {
891 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
892 __func__,
893 toString(type).c_str(),
894 toString(OperandType::INT32).c_str(),
895 toString(OperandType::TENSOR_INT32).c_str());
896 }
897
898 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100899 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100900 {
901 return Fail("%s: failed to get activation input value", __func__);
902 }
903 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
904 return true;
905}
906
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100907template<typename HalPolicy,
908 typename HalOperation = typename HalPolicy::Operation,
909 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100910bool GetInputActivationFunction(const HalOperation& operation,
911 uint32_t inputIndex,
912 ActivationFn& outActivationFunction,
913 const HalModel& model,
914 const ConversionData& data)
915{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100916 return GetInputActivationFunctionImpl<HalPolicy>(operation,
917 inputIndex,
918 HalPolicy::OperandType::INT32,
919 outActivationFunction,
920 model,
921 data);
arovir01b0717b52018-09-05 17:03:25 +0100922}
923
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100924template<typename HalPolicy,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100927bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
928 uint32_t inputIndex,
929 ActivationFn& outActivationFunction,
930 const HalModel& model,
931 const ConversionData& data)
932{
933 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100934 return GetInputActivationFunctionImpl<HalPolicy>(operation,
935 inputIndex,
936 HalPolicy::OperandType::INT32,
937 outActivationFunction,
938 model,
939 data);
arovir01b0717b52018-09-05 17:03:25 +0100940}
941
942
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943template<typename HalPolicy,
944 typename HalOperation = typename HalPolicy::Operation,
945 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100946bool GetOptionalInputActivation(const HalOperation& operation,
947 uint32_t inputIndex,
948 ActivationFn& activationFunction,
949 const HalModel& model,
950 const ConversionData& data)
951{
952 if (operation.inputs.size() <= inputIndex)
953 {
954 activationFunction = ActivationFn::kActivationNone;
955 }
956 else
957 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100959 {
960 return Fail("%s: Operation has invalid inputs", __func__);
961 }
962 }
963 return true;
964}
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966template<typename HalPolicy,
967 typename ConvolutionDescriptor,
968 typename HalOperation = typename HalPolicy::Operation,
969 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100970bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
971 uint32_t dilationXIndex,
972 ConvolutionDescriptor& descriptor,
973 const HalModel& model,
974 const ConversionData& data)
975{
976 bool success = true;
977 if (operation.inputs.size() >= dilationXIndex + 2)
978 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979 success &= GetInputScalar<HalPolicy>(operation,
980 dilationXIndex,
981 HalPolicy::OperandType::INT32,
982 descriptor.m_DilationX,
983 model,
984 data);
985 success &= GetInputScalar<HalPolicy>(operation,
986 dilationXIndex + 1,
987 HalPolicy::OperandType::INT32,
988 descriptor.m_DilationY,
989 model,
990 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100991 }
992
993 return success;
994}
995
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996template<typename HalPolicy,
997 typename HalOperand = typename HalPolicy::Operand,
998 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100999bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001000 std::vector<int32_t>& outValues,
1001 const HalModel& model,
1002 const ConversionData& data)
1003{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001005 {
1006 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1007 }
1008
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001009 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001010 if (!startAddress)
1011 {
1012 return Fail("%s: failed to get operand address", __func__, operand.type);
1013 }
1014
1015 // Check number of bytes is sensible
1016 const uint32_t numBytes = operand.location.length;
1017 if (numBytes % sizeof(int32_t) != 0)
1018 {
1019 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1020 __func__, numBytes, sizeof(int32_t));
1021 }
1022
1023 outValues.resize(numBytes / sizeof(int32_t));
1024 memcpy(outValues.data(), startAddress, numBytes);
1025 return true;
1026}
1027
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028template<typename HalPolicy,
1029 typename HalOperation = typename HalPolicy::Operation,
1030 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001031bool GetInputPaddingScheme(const HalOperation& operation,
1032 uint32_t inputIndex,
1033 PaddingScheme& outPaddingScheme,
1034 const HalModel& model,
1035 const ConversionData& data)
1036{
1037 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001038 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001039 {
1040 return Fail("%s: failed to get padding scheme input value", __func__);
1041 }
1042
1043 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1044 return true;
1045}
1046
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047template<typename HalPolicy,
1048 typename HalOperation = typename HalPolicy::Operation,
1049 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001050LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1051 uint32_t inputIndex,
1052 const HalModel& model,
1053 ConversionData& data)
1054{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001055 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001056 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001057 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1058
1059 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001060 if (!operand)
1061 {
1062 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1063 return LayerInputHandle();
1064 }
1065
1066 if (!IsOperandTypeSupportedForTensors(operand->type))
1067 {
1068 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1069 return LayerInputHandle();
1070 }
1071
Sadik Armagan44bcc022019-06-18 17:21:36 +01001072 try
arovir01b0717b52018-09-05 17:03:25 +01001073 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001074 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001075 if (IsDynamicTensor(operandTensorInfo))
1076 {
1077 Fail("%s: dynamic input tensors are not supported", __func__);
1078 return LayerInputHandle();
1079 }
arovir01b0717b52018-09-05 17:03:25 +01001080
Sadik Armagan44bcc022019-06-18 17:21:36 +01001081 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001082 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001084 {
1085 // NOTE: We must check whether we can support the input tensor on at least one
1086 // of the provided backends; otherwise we cannot convert the operation
1087 bool isInputSupported = false;
1088 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1089 IsInputSupported,
1090 data.m_Backends,
1091 isInputSupported,
1092 operandTensorInfo);
1093
1094 if (!isInputSupported)
1095 {
1096 Fail("%s: unsupported input tensor", __func__);
1097 return LayerInputHandle();
1098 }
1099
1100 BOOST_FALLTHROUGH; // intentional fallthrough
1101 }
1102 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001104 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001105 // The tensor is either an operand internal to the model, or a model input.
1106 // It can be associated with an ArmNN output slot for an existing layer.
1107
1108 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1109 const uint32_t operandIndex = operation.inputs[inputIndex];
1110 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001111 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001112 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001113 case HalOperandLifeTime::CONSTANT_REFERENCE:
1114 {
1115 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1116 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1117 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001118 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001119 bool isSupported = false;
1120 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1121 IsConstantSupported,
1122 data.m_Backends,
1123 isSupported,
1124 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001125 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001126 {
1127 return LayerInputHandle();
1128 }
1129
1130 armnn::IConnectableLayer* constantLayer =
1131 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1132 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1133 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1134
1135 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1136 }
1137 else
1138 {
1139 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001140 return LayerInputHandle();
1141 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001142 break;
arovir01b0717b52018-09-05 17:03:25 +01001143 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001144 default:
arovir01b0717b52018-09-05 17:03:25 +01001145 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001146 // Unsupported lifetime for an input tensor
1147 Fail("%s: unsupported lifetime for input tensor: %s",
1148 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001149 return LayerInputHandle();
1150 }
arovir01b0717b52018-09-05 17:03:25 +01001151 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001152 }
1153 catch (UnsupportedOperand<HalOperandType>& e)
1154 {
1155 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1156 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001157 }
1158}
1159
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001160template<typename HalPolicy,
1161 typename HalOperation = typename HalPolicy::Operation,
1162 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001163bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1164 uint32_t operationOutputIndex,
1165 armnn::IConnectableLayer& layer,
1166 uint32_t layerOutputIndex,
1167 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001168 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001169{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001170 using HalOperand = typename HalPolicy::Operand;
1171
1172 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001173 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1174 {
1175 return false;
1176 }
1177
1178 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1179
1180 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1181 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1182
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001183 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001184
1185 return true;
1186}
1187
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001188template<typename HalPolicy,
1189 typename HalOperation = typename HalPolicy::Operation,
1190 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001191armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1192 uint32_t inputIndex,
1193 const HalModel& model,
1194 ConversionData& data)
1195{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196 using HalOperand = typename HalPolicy::Operand;
1197
1198 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001199 if (!operand)
1200 {
1201 return armnn::DataLayout::NHWC;
1202 }
1203
1204 if (!IsBool(*operand))
1205 {
1206 return armnn::DataLayout::NHWC;
1207 }
1208
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001209 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001210 if (!valueAddress)
1211 {
1212 return armnn::DataLayout::NHWC;
1213 }
1214
1215 if (*(static_cast<const bool*>(valueAddress)))
1216 {
1217 return armnn::DataLayout::NCHW;
1218 }
1219 else
1220 {
1221 return armnn::DataLayout::NHWC;
1222 }
1223}
1224
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001225template<typename HalPolicy,
1226 typename HalOperation = typename HalPolicy::Operation,
1227 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001228bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1229 uint32_t outputIndex,
1230 armnn::IConnectableLayer& layer,
1231 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001232 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001233{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001234 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1235 outputIndex,
1236 layer,
1237 outputIndex,
1238 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001239 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001240}
1241
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001242template<typename HalPolicy,
1243 typename HalOperation = typename HalPolicy::Operation,
1244 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001245bool ConvertToActivation(const HalOperation& operation,
1246 const char* operationName,
1247 const armnn::ActivationDescriptor& activationDesc,
1248 const HalModel& model,
1249 ConversionData& data)
1250{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001251 using HalOperand = typename HalPolicy::Operand;
1252
1253 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001254 if (!input.IsValid())
1255 {
1256 return Fail("%s: Input 0 is invalid", operationName);
1257 }
1258
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001259 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001260 if (!outputOperand)
1261 {
1262 return false;
1263 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001264
1265 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001266 if (IsDynamicTensor(outInfo))
1267 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001268 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001269 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001270
1271 bool isSupported = false;
1272 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1273 IsActivationSupported,
1274 data.m_Backends,
1275 isSupported,
1276 input.GetTensorInfo(),
1277 outInfo,
1278 activationDesc);
1279 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001280 {
1281 return false;
1282 }
1283
1284 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1285 BOOST_ASSERT(layer != nullptr);
1286 input.Connect(layer->GetInputSlot(0));
1287
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001288 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001289}
1290
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001291template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001292 typename HalOperation = typename HalPolicy::Operation,
1293 typename HalModel = typename HalPolicy::Model>
1294bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1295{
1296 armnn::ActivationDescriptor desc;
1297 desc.m_Function = armnn::ActivationFunction::ReLu;
1298
1299 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1300}
1301
1302template<typename HalPolicy,
1303 typename HalOperation = typename HalPolicy::Operation,
1304 typename HalModel = typename HalPolicy::Model>
1305bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1306{
1307 armnn::ActivationDescriptor desc;
1308 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1309 desc.m_A = 1.0f;
1310 desc.m_B = -1.0f;
1311
1312 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1313}
1314
1315template<typename HalPolicy,
1316 typename HalOperation = typename HalPolicy::Operation,
1317 typename HalModel = typename HalPolicy::Model>
1318bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1319{
1320 armnn::ActivationDescriptor desc;
1321 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1322 desc.m_A = 6.0f;
1323
1324 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1325}
1326
1327template<typename HalPolicy,
1328 typename HalOperation = typename HalPolicy::Operation,
1329 typename HalModel = typename HalPolicy::Model>
1330bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1331{
1332 armnn::ActivationDescriptor desc;
1333 desc.m_Function = armnn::ActivationFunction::TanH;
1334 desc.m_A = 1.0f; // android nn does not support tanH parameters
1335 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1336
1337 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1338}
1339
1340template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001343bool ConvertPaddings(const HalOperation& operation,
1344 const HalModel& model,
1345 ConversionData& data,
1346 unsigned int rank,
1347 armnn::PadDescriptor& padDescriptor)
1348{
1349 using HalOperand = typename HalPolicy::Operand;
1350
1351 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1352 if (!paddingsOperand)
1353 {
1354 return Fail("%s: Could not read paddings operand", __func__);
1355 }
1356
1357 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1358 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1359 {
1360 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1361 }
1362
1363 std::vector<int32_t> paddings;
1364 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1365
1366 // add padding for each dimension of input tensor.
1367 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1368 {
1369 int paddingBeforeInput = paddings[i];
1370 int paddingAfterInput = paddings[i + 1];
1371
1372 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1373 {
1374 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1375 }
1376
1377 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1378 }
1379
1380 return true;
1381}
1382
1383template<typename HalPolicy,
1384 typename HalOperation = typename HalPolicy::Operation,
1385 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001386bool ConvertPooling2d(const HalOperation& operation,
1387 const char* operationName,
1388 armnn::PoolingAlgorithm poolType,
1389 const HalModel& model,
1390 ConversionData& data)
1391{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001392 using HalOperand = typename HalPolicy::Operand;
1393 using HalOperandType = typename HalPolicy::OperandType;
1394
1395 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001396 if (!input.IsValid())
1397 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001398 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001399 }
1400
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001401 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001402 if (!output)
1403 {
1404 return Fail("%s: Could not read output 0", __func__);
1405 }
1406
1407 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1408 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1409
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001410 if (IsDynamicTensor(outputInfo))
1411 {
1412 return Fail("%s: Dynamic output tensors are not supported", __func__);
1413 }
1414
arovir01b0717b52018-09-05 17:03:25 +01001415 armnn::Pooling2dDescriptor desc;
1416 desc.m_PoolType = poolType;
1417 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001418 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001419
1420 ActivationFn activation;
1421
Sadik Armagan15d63e22019-07-26 16:59:35 +01001422 auto inputSize = operation.inputs.size();
1423
1424 if (inputSize >= 10)
1425 {
1426 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1427 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1428 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1429 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1430 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1431 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1432 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1433 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1434 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1435 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1436 {
1437 return Fail("%s: Operation has invalid inputs", operationName);
1438 }
1439
1440 if (Is12Operand(*output))
1441 {
1442 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1443 }
1444 }
1445 else
arovir01b0717b52018-09-05 17:03:25 +01001446 {
1447 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1448 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001449 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1450 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1451 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1452 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1453 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1454 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001455 {
1456 return Fail("%s: Operation has invalid inputs", operationName);
1457 }
1458
Sadik Armagan15d63e22019-07-26 16:59:35 +01001459 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001460 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001461 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001462 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001463
1464 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1465 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1466 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1467
1468 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1469 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001470 }
1471
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001472 bool isSupported = false;
1473 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1474 IsPooling2dSupported,
1475 data.m_Backends,
1476 isSupported,
1477 inputInfo,
1478 outputInfo,
1479 desc);
1480 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001481 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001482 return false;
arovir01b0717b52018-09-05 17:03:25 +01001483 }
arovir01b0717b52018-09-05 17:03:25 +01001484
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001485 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1486 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001487 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001488 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001489 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001490
1491 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1492 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001493 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001494 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001495 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001496
1497 input.Connect(pooling2dLayer->GetInputSlot(0));
1498
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001499 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001500}
1501
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001502template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001503 typename HalOperation = typename HalPolicy::Operation,
1504 typename HalModel = typename HalPolicy::Model>
1505bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001506{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001507 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001508
1509 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1510 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1511
1512 if (!input0.IsValid() || !input1.IsValid())
1513 {
1514 return Fail("%s: Operation has invalid inputs", __func__);
1515 }
1516
1517 // The FuseActivation parameter is always the input index 2
1518 // and it should be optional
1519 ActivationFn activationFunction;
1520 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1521 {
1522 return Fail("%s: Operation has invalid inputs", __func__);
1523 }
1524
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001525 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001526 if (!outputOperand)
1527 {
1528 return false;
1529 }
1530
1531 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1532 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1533
1534 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1535 if (IsDynamicTensor(outputInfo))
1536 {
1537 return Fail("%s: Dynamic output tensors are not supported", __func__);
1538 }
1539
1540 bool isSupported = false;
1541 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1542 IsAdditionSupported,
1543 data.m_Backends,
1544 isSupported,
1545 inputInfo0,
1546 inputInfo1,
1547 outputInfo);
1548 if (!isSupported)
1549 {
1550 return false;
1551 }
1552
1553 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1554 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1555
1556 if (endLayer != nullptr)
1557 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001558 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1559 if (!isReshapeSupported)
1560 {
1561 return false;
1562 }
1563
Mike Kelly46272802019-08-14 17:00:48 +01001564 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1565 }
1566 else
1567 {
1568 return Fail("%s: ProcessActivation failed", __func__);
1569 }
1570}
1571
1572template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001573 typename HalOperation = typename HalPolicy::Operation,
1574 typename HalModel = typename HalPolicy::Model>
1575bool ConvertArgMinMax(const HalOperation& operation,
1576 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001577 ConversionData& data,
1578 armnn::ArgMinMaxFunction argMinMaxFunction)
1579{
1580 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1581
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001582 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001583 using HalOperandType = typename HalPolicy::OperandType;
1584
1585 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1586
1587 if (!input0.IsValid())
1588 {
1589 return Fail("%s: Operation has invalid inputs", __func__);
1590 }
1591
1592 int32_t axis;
1593 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1594 {
1595 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1596 }
1597
1598 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1599 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1600
1601 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1602 {
1603 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1604 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1605 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1606 return Fail("%s: Axis must be in range [-n, n)", __func__);
1607 }
1608
1609 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1610 if (!output)
1611 {
1612 return Fail("%s: Could not read output 0", __func__);
1613 }
1614
1615 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1616
1617 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1618 if (IsDynamicTensor(outputInfo))
1619 {
1620 return Fail("%s: Dynamic output tensors are not supported", __func__);
1621 }
1622
1623 armnn::ArgMinMaxDescriptor descriptor;
1624 descriptor.m_Function = argMinMaxFunction;
1625 descriptor.m_Axis = axis;
1626
1627 bool isSupported = false;
1628 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1629 IsArgMinMaxSupported,
1630 data.m_Backends,
1631 isSupported,
1632 inputInfo0,
1633 outputInfo,
1634 descriptor);
1635 if (!isSupported)
1636 {
1637 return false;
1638 }
1639
1640 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1641 assert(layer != nullptr);
1642
1643 input0.Connect(layer->GetInputSlot(0));
1644
1645 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1646}
1647
1648template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001649 typename HalOperation = typename HalPolicy::Operation,
1650 typename HalModel = typename HalPolicy::Model>
1651bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001652{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001653 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001654 using HalOperandType = typename HalPolicy::OperandType;
1655
1656 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1657 if (operation.inputs.size() <= 1)
1658 {
1659 return Fail("%s: Operation has insufficient arguments", __func__);
1660 }
1661
1662 // Get inputs and outputs
1663 const std::size_t numInputTensors = operation.inputs.size() - 1;
1664
1665 int32_t concatDim;
1666 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1667 {
1668 return Fail("%s: Operation has invalid inputs", __func__);
1669 }
1670
1671 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1672 if (!outputOperand)
1673 {
1674 return Fail("%s: Operation has no outputs", __func__);
1675 }
1676
1677
1678 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1679 armnn::TensorShape outputShape = outputInfo.GetShape();
1680
1681 //
1682 // handle negative concat dims along the lines of tensorflow as described here:
1683 // https://www.tensorflow.org/api_docs/python/tf/concat
1684 // "negative axis refers to axis + rank(values)-th dimension"
1685 //
1686 if (concatDim < 0)
1687 {
1688 concatDim += outputShape.GetNumDimensions();
1689 }
1690
1691 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1692 {
1693 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1694 }
1695
1696 std::vector<LayerInputHandle> inputHandles;
1697 std::vector<armnn::TensorShape> inputShapes;
1698
1699 inputHandles.reserve(numInputTensors);
1700 inputShapes.reserve(numInputTensors);
1701
1702 bool inputsHaveBeenReshaped = false;
1703 unsigned int tensorDimensionsAdded = 0;
1704
1705 for (uint32_t i = 0; i < numInputTensors; ++i)
1706 {
1707 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1708 if (!operand)
1709 {
1710 return Fail("%s: Operation has invalid inputs", __func__);
1711 }
1712
Teresa Charlin3b959602019-10-31 17:05:47 +00001713 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1714 if (!operandInputHandle.IsValid())
1715 {
1716 return Fail("%s: Operation has invalid inputs", __func__);
1717 }
Mike Kellyb8805202019-07-31 17:25:43 +01001718
Teresa Charlin3b959602019-10-31 17:05:47 +00001719 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001720 if (operandShape.GetNumDimensions() == 0)
1721 {
1722 return Fail("%s: Operands with rank 0 are not supported", __func__);
1723 }
1724
1725 if (RequiresReshape(operandShape))
1726 {
1727 inputsHaveBeenReshaped = true;
1728
1729 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1730
1731 // Expand the tensor to three dimensions
1732 if (operandShape.GetNumDimensions() == 2)
1733 {
1734 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1735 tensorDimensionsAdded = 1;
1736 }
1737 else
1738 {
1739 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1740 tensorDimensionsAdded = 2;
1741 }
1742
1743 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1744 *data.m_Network,
1745 operandInputHandle,
1746 reshapeInfo
1747 );
1748
1749 // Point to the reshape operation rather then the input operation
1750 operandShape = reshapeInfo.GetShape();
1751 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1752 }
1753
1754 inputShapes.emplace_back(operandShape);
1755 inputHandles.emplace_back(operandInputHandle);
1756
1757 if (!inputHandles.back().IsValid())
1758 {
1759 return Fail("%s: Operation has invalid inputs", __func__);
1760 }
1761 }
1762
1763 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1764
1765 if (inputsHaveBeenReshaped)
1766 {
1767 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1768 concatDim += tensorDimensionsAdded;
1769
1770 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1771 if (tensorDimensionsAdded == 1)
1772 {
1773 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1774 }
1775 else if (tensorDimensionsAdded == 2)
1776 {
1777 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1778 }
1779 }
1780
1781 // Check if permutations is required and get the pair of permutations required for the concatenation.
1782 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1783 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1784 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1785
1786 bool needPermute =
1787 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1788
1789 if (needPermute)
1790 {
1791 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1792 }
1793
1794 outputInfo.SetShape(outputShape);
1795
1796 // this is no-op for identity swizzles, otherwise it replaces both
1797 // the handles and shapes with the swizzled layer output handles and shapes
1798 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1799
1800 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1801 armnn::OriginsDescriptor concatDescriptor;
1802
1803 try
1804 {
1805 // The concat descriptor is always created across the only supported concat dimension
1806 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1807 concatDescriptor =
1808 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1809 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001810 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001811 {
1812 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1813 }
1814
1815 // Validate the output shape is correct given the input shapes based on the
1816 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1817 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1818 {
1819 return Fail("%s: Error validating the output shape for concat", __func__);
1820 }
1821
1822 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1823 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1824 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1825
1826 bool isSupported = false;
1827 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1828 IsConcatSupported,
1829 data.m_Backends,
1830 isSupported,
1831 inputTensorInfos,
1832 outputInfo,
1833 concatDescriptor);
1834 if (!isSupported)
1835 {
1836 return false;
1837 }
1838
1839 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1840 assert(layer != nullptr);
1841 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1842
1843 // Connect inputs to the layer
1844 const int numInputSlots = layer->GetNumInputSlots();
1845 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1846 for (int i = 0; i < numInputSlots; ++i)
1847 {
1848 // connect the input directly to the merge (concat) layer
1849 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1850 }
1851
1852 if (needPermute)
1853 {
1854 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1855 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1856 layer->GetOutputSlot(0),
1857 permutationPair.second);
1858 layer = &deswizzleLayer;
1859 }
1860
1861 if (inputsHaveBeenReshaped)
1862 {
1863 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1864
1865 // Undo the reshape knowing the amount of dimensions added
1866 if (tensorDimensionsAdded == 1)
1867 {
1868 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1869 afterConcatInfo.GetShape()[2] }));
1870 }
1871 else if (tensorDimensionsAdded == 2)
1872 {
1873 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1874 }
1875
1876 layer = &AddReshapeLayer(
1877 *data.m_Network,
1878 layer->GetOutputSlot(0),
1879 afterConcatInfo
1880 );
1881 }
1882
1883 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1884}
1885
1886template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001887 typename HalOperation = typename HalPolicy::Operation,
1888 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001889bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1890{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001891 using HalOperand = typename HalPolicy::Operand;
1892 using HalOperandType = typename HalPolicy::OperandType;
1893
1894 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001895 if (!input.IsValid())
1896 {
1897 return Fail("%s: Operation has invalid inputs", __func__);
1898 }
1899
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001900 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001901 if (!output)
1902 {
1903 return Fail("%s: Could not read output 0", __func__);
1904 }
1905
1906 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001907 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001908
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001909 if (IsDynamicTensor(outputInfo))
1910 {
1911 return Fail("%s: Dynamic output tensors are not supported", __func__);
1912 }
1913
Mike Kellyb5fdf382019-06-11 16:35:25 +01001914 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001915 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1916 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001917
1918 if (!weightsPin.IsValid() || !biasPin.IsValid())
1919 {
1920 return Fail("%s: Operation has invalid inputs", __func__);
1921 }
1922
1923 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001924 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001925 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1926
1927 armnn::Convolution2dDescriptor desc;
1928 desc.m_DataLayout = armnn::DataLayout::NHWC;
1929 ActivationFn activation;
1930
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001931 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001932 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001933 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1934 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1935 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1936 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1937 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1938 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001939 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001940 {
1941 return Fail("%s: Operation has invalid inputs", __func__);
1942 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001943 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001944 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001945 {
1946 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001947 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1948 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1949 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001950 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001951 {
1952 return Fail("%s: Operation has invalid inputs", __func__);
1953 }
1954
1955 const uint32_t kernelX = weights.GetShape()[2];
1956 const uint32_t kernelY = weights.GetShape()[1];
1957 const uint32_t inputX = inputInfo.GetShape()[2];
1958 const uint32_t inputY = inputInfo.GetShape()[1];
1959
1960 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1961 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001962 }
1963 else
1964 {
1965 return Fail("%s: Unsupported number of operation inputs", __func__);
1966 }
1967
1968 desc.m_BiasEnabled = true;
1969 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1970
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001971 bool isSupported = false;
1972 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1973 IsConvolution2dSupported,
1974 data.m_Backends,
1975 isSupported,
1976 inputInfo,
1977 outputInfo,
1978 desc,
1979 weights.GetInfo(),
1980 biases);
1981 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001982 {
1983 return false;
1984 }
1985
1986 armnn::IConnectableLayer* startLayer =
1987 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1988
1989 if (!startLayer)
1990 {
1991 return Fail("%s: AddConvolution2dLayer failed", __func__);
1992 }
1993
1994 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1995
1996 if (!endLayer)
1997 {
1998 return Fail("%s: ProcessActivation failed", __func__);
1999 }
2000
2001 input.Connect(startLayer->GetInputSlot(0));
2002
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002003 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002004}
2005
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002006template<typename HalPolicy,
2007 typename HalOperation = typename HalPolicy::Operation,
2008 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002009bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2010{
2011 using HalOperand = typename HalPolicy::Operand;
2012 using HalOperandType = typename HalPolicy::OperandType;
2013
2014 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2015 if (!input.IsValid() )
2016 {
2017 return Fail("%s: Operation has invalid inputs", __func__);
2018 }
2019
2020 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2021 unsigned int rank = inputInfo.GetNumDimensions();
2022 if (rank != 4)
2023 {
2024 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2025 }
2026
2027 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2028 if (!output)
2029 {
2030 return Fail("%s: Could not read output 0", __func__);
2031 }
2032
2033 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2034 if (IsDynamicTensor(outputInfo))
2035 {
2036 return Fail("%s: Dynamic output tensors are not supported", __func__);
2037 }
2038
2039 armnn::DepthToSpaceDescriptor descriptor;
2040
2041 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2042 if (descriptor.m_BlockSize <= 1)
2043 {
2044 return Fail("%s: Block size must be at least 1 in all dimensions");
2045 }
2046
2047 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2048 if (Is12Operand(*output))
2049 {
2050 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2051 }
2052
2053 bool isSupported = false;
2054 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2055 IsDepthToSpaceSupported,
2056 data.m_Backends,
2057 isSupported,
2058 inputInfo,
2059 outputInfo,
2060 descriptor);
2061 if (!isSupported)
2062 {
2063 return false;
2064 }
2065
2066 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2067 assert(layer != nullptr);
2068 input.Connect(layer->GetInputSlot(0));
2069
2070 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2071}
2072
2073template<typename HalPolicy,
2074 typename HalOperation = typename HalPolicy::Operation,
2075 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002076bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2077{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002078 using HalOperand = typename HalPolicy::Operand;
2079 using HalOperandType = typename HalPolicy::OperandType;
2080
2081 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002082
2083 if (!input.IsValid())
2084 {
2085 return Fail("%s: Operation has invalid inputs", __func__);
2086 }
2087
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002088 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002089
2090 if (!output)
2091 {
2092 return Fail("%s: Could not read output 0", __func__);
2093 }
2094
2095 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002096 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002097
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002098 if (IsDynamicTensor(outputInfo))
2099 {
2100 return Fail("%s: Dynamic output tensors are not supported", __func__);
2101 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002102
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002103 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002104 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002105 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002106
2107 if (weightsOperand == nullptr)
2108 {
2109 return Fail("%s: Operand is invalid", __func__);
2110 }
2111 armnn::DepthwiseConvolution2dDescriptor desc;
2112 desc.m_DataLayout = armnn::DataLayout::NHWC;
2113
Mike Kellyb5fdf382019-06-11 16:35:25 +01002114 // Reinterpret weight data as [ H, W, I, M ]
2115 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2116 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002117 inputInfo.GetShape()[3],
2118 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002119
2120 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2121 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2122
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002123 const ConstTensorPin weightsPin =
2124 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2125 1,
2126 model,
2127 data,
2128 HWIMToMIHW,
2129 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002130
2131 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002132 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002133
2134 if (!weightsPin.IsValid() || !biasPin.IsValid())
2135 {
2136 return Fail("%s: Operation has invalid inputs", __func__);
2137 }
2138
2139 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2140 armnn::ConstTensor bias = biasPin.GetConstTensor();
2141 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2142
2143 ActivationFn activation;
2144
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002145 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002146 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002147 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2148 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2149 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2150 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2151 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2152 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002153 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002154 {
2155 return Fail("%s: Operation has invalid inputs", __func__);
2156 }
2157 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002158 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002159 {
2160 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002161 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2162 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2163 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002164 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002165 {
2166 return Fail("%s: Operation has invalid inputs", __func__);
2167 }
2168
2169 const uint32_t kernelX = weights.GetShape()[3];
2170 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002171 const uint32_t inputX = inputInfo.GetShape()[2];
2172 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173
2174 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2175 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2176 }
2177 else
2178 {
2179 return Fail("%s: Unsupported number of operation inputs", __func__);
2180 }
2181
2182 desc.m_BiasEnabled = true;
2183 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2184
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002185 bool isSupported = false;
2186 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2187 IsDepthwiseConvolutionSupported,
2188 data.m_Backends,
2189 isSupported,
2190 inputInfo,
2191 outputInfo,
2192 desc,
2193 weights.GetInfo(),
2194 biases);
2195 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002196 {
2197 return false;
2198 }
2199
2200 armnn::IConnectableLayer* startLayer =
2201 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2202 if (!startLayer)
2203 {
2204 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2205 }
2206
2207 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2208 if (!endLayer)
2209 {
2210 return Fail("%s: ProcessActivation failed", __func__);
2211 }
2212
2213 input.Connect(startLayer->GetInputSlot(0));
2214
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002215 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002216}
2217
Mike Kelly3c673942019-07-25 09:26:06 +01002218template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002219 typename HalOperation = typename HalPolicy::Operation,
2220 typename HalModel = typename HalPolicy::Model>
2221bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002222{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002223 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002224
2225 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2226 if (!input.IsValid())
2227 {
2228 return Fail("%s: Operation has invalid input", __func__);
2229 }
2230
Sadik Armagan98c0f662019-11-21 15:54:36 +00002231 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2232 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2233 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2234 {
2235 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2236 }
2237
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002238 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002239 if (!outputOperand)
2240 {
2241 return Fail("%s: Operation has invalid outputs", __func__);
2242 }
2243
2244 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2245 if (IsDynamicTensor(outputInfo))
2246 {
2247 return Fail("%s: Dynamic output tensors are not supported", __func__);
2248 }
2249
2250 bool isSupported = false;
2251 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2252 IsDequantizeSupported,
2253 data.m_Backends,
2254 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002255 inputInfo,
2256 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002257 if (!isSupported)
2258 {
2259 return false;
2260 }
2261
2262 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2263 assert(layer != nullptr);
2264 input.Connect(layer->GetInputSlot(0));
2265
2266 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2267}
2268
2269template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002270 typename HalOperation = typename HalPolicy::Operation,
2271 typename HalModel = typename HalPolicy::Model>
2272bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002273{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002274 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002275
2276 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2277 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2278
2279 if (!input0.IsValid() || !input1.IsValid())
2280 {
2281 return Fail("%s: Operation has invalid inputs", __func__);
2282 }
2283
2284 // The FuseActivation parameter is always the input index 2
2285 // and it should be optional
2286 ActivationFn activationFunction;
2287 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2288 {
2289 return Fail("%s: Operation has invalid inputs", __func__);
2290 }
2291
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002292 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002293 if (!output)
2294 {
2295 return Fail("%s: Could not read output 0", __func__);
2296 }
2297
2298 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2299 if (IsDynamicTensor(outputInfo))
2300 {
2301 return Fail("%s: Dynamic output tensors are not supported", __func__);
2302 }
2303
2304 bool isSupported = false;
2305 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2306 IsDivisionSupported,
2307 data.m_Backends,
2308 isSupported,
2309 input0.GetTensorInfo(),
2310 input1.GetTensorInfo(),
2311 outputInfo);
2312 if (!isSupported)
2313 {
2314 return false;
2315 }
2316
2317 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2318 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2319
2320 if (endLayer)
2321 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002322 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2323 if (!isReshapeSupported)
2324 {
2325 return false;
2326 }
2327
Mike Kelly46272802019-08-14 17:00:48 +01002328 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2329 }
2330 return Fail("%s: ProcessActivation failed", __func__);
2331}
2332
2333template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002334 typename HalOperation = typename HalPolicy::Operation,
2335 typename HalModel = typename HalPolicy::Model>
2336bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002337{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002338 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002339
2340 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2341 if (!input.IsValid())
2342 {
2343 return Fail("%s: Operation has invalid inputs", __func__);
2344 }
2345
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002346 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002347 if (!outputOperand)
2348 {
2349 return Fail("%s: Operation has invalid outputs", __func__);
2350 }
2351
2352 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2353 if (IsDynamicTensor(outputInfo))
2354 {
2355 return Fail("%s: Dynamic output tensors are not supported", __func__);
2356 }
2357
2358 bool isSupported = false;
2359 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2360 IsFloorSupported,
2361 data.m_Backends,
2362 isSupported,
2363 input.GetTensorInfo(),
2364 outputInfo);
2365 if (!isSupported)
2366 {
2367 return false;
2368 }
2369
2370 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2371 assert(layer != nullptr);
2372 input.Connect(layer->GetInputSlot(0));
2373
2374 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2375}
2376
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002377inline bool IsQSymm8(const V1_0::Operand&)
2378{
2379 return false;
2380}
2381
2382#ifdef ARMNN_ANDROID_NN_V1_2
2383
2384inline bool IsQSymm8(const V1_2::Operand& operand)
2385{
2386 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2387}
2388
2389#endif
2390
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002391enum class DequantizeStatus
2392{
2393 SUCCESS,
2394 NOT_REQUIRED,
2395 INVALID_OPERAND
2396};
2397
2398using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2399
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002400template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002401 typename HalOperation = typename HalPolicy::Operation,
2402 typename HalModel = typename HalPolicy::Model>
2403DequantizeResult DequantizeIfRequired(size_t operand_index,
2404 const HalOperation& operation,
2405 const HalModel& model,
2406 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002407{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002408 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002409
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002410 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002411 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002412 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002413 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002414 }
2415
2416 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2417 {
2418 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002419 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002420 }
2421
2422 const size_t weightsInputIndex = operation.inputs[operand_index];
2423
2424 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2425 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2426 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2427 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002428 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002429 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002430 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2431 {
2432 continue;
2433 }
2434
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002435 size_t outOpIndex = weightsInputIndex + 1;
2436 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002437 {
2438 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002439 }
2440
2441 if (outOpIndex != weightsInputIndex)
2442 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002443 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002444 }
2445
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002446 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002447 BOOST_ASSERT(operand);
2448
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002449 if (!IsQSymm8(*operand))
2450 {
2451 // Only supporting dequantize from QSYMM8 to FLOAT
2452 break;
2453 }
2454
2455 // Allocate a new buffer for the dequantized data and manually dequantize
2456 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2457 if (!startValue)
2458 {
2459 // Failed to get the operand address
2460 break;
2461 }
2462
2463 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2464 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002465 const float quantizationScale = operand->scale;
2466
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002467 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2468 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2469 {
2470 float* dstPtr = dequantizedBuffer.get();
2471 BOOST_ASSERT(dstPtr);
2472 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2473 }
2474
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002475 // Construct tensor info for dequantized ConstTensor
2476 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2477 operand->dimensions.data(),
2478 armnn::DataType::Float32);
2479
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002480 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2481 std::move(tensorInfo),
2482 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002483 }
2484
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002485 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002486}
2487
2488template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002489 typename HalOperation = typename HalPolicy::Operation,
2490 typename HalModel = typename HalPolicy::Model>
2491ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2492 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002493 const ConversionData& data,
2494 size_t operandIndex,
2495 bool optional = false)
2496{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002497 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2498
2499 DequantizeStatus status = std::get<3>(dequantized);
2500 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002501 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002502 case DequantizeStatus::INVALID_OPERAND:
2503 {
2504 // return invalid const tensor pin
2505 return ConstTensorPin();
2506 }
2507 case DequantizeStatus::NOT_REQUIRED:
2508 {
2509 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2510 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2511 }
2512 case DequantizeStatus::SUCCESS:
2513 default:
2514 {
2515 return ConstTensorPin(
2516 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2517 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002518 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002519}
2520
2521
Mike Kelly46272802019-08-14 17:00:48 +01002522template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002523 typename HalOperation = typename HalPolicy::Operation,
2524 typename HalModel = typename HalPolicy::Model>
2525bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002526{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002527 using HalOperand = typename HalPolicy::Operand;
2528
Mike Kelly46272802019-08-14 17:00:48 +01002529 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2530 if (!input.IsValid())
2531 {
2532 return Fail("%s: Operation has invalid inputs", __func__);
2533 }
2534
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002535 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002536 if (!output)
2537 {
2538 return Fail("%s: Could not read output 0", __func__);
2539 }
2540
2541 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2542 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2543
2544 if (IsDynamicTensor(outputInfo))
2545 {
2546 return Fail("%s: Dynamic output tensors are not supported", __func__);
2547 }
2548
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002549 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2550 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002551
2552 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002553 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002554 return Fail("%s: Operation has invalid weights", __func__);
2555 }
2556
2557 if (!biasPin.IsValid())
2558 {
2559 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002560 }
2561
2562 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2563 armnn::ConstTensor bias = biasPin.GetConstTensor();
2564 armnn::TensorInfo reshapedInfo = inputInfo;
2565
2566 try
2567 {
2568 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002569 }
2570 catch (const std::exception& e)
2571 {
Mike Kelly46272802019-08-14 17:00:48 +01002572 return Fail("%s: %s", __func__, e.what());
2573 }
2574
2575 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2576 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2577
2578 ActivationFn activationFunction;
2579 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2580 {
2581 return Fail("%s: Operation has invalid inputs", __func__);
2582 }
2583
2584 armnn::FullyConnectedDescriptor desc;
2585 desc.m_TransposeWeightMatrix = true;
2586 desc.m_BiasEnabled = true;
2587
2588 bool isSupported = false;
2589 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2590 IsFullyConnectedSupported,
2591 data.m_Backends,
2592 isSupported,
2593 reshapedInfo,
2594 outputInfo,
2595 weights.GetInfo(),
2596 bias.GetInfo(),
2597 desc);
2598 if (!isSupported)
2599 {
2600 return false;
2601 }
2602
2603 armnn::IConnectableLayer* startLayer =
2604 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2605 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2606
2607 if (endLayer != nullptr)
2608 {
2609 if (inputInfo.GetNumDimensions() > 2U)
2610 {
2611 armnn::ReshapeDescriptor reshapeDescriptor;
2612 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2613
2614 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2615 assert(reshapeLayer != nullptr);
2616 input.Connect(reshapeLayer->GetInputSlot(0));
2617 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2618 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2619 }
2620 else
2621 {
2622 input.Connect(startLayer->GetInputSlot(0));
2623 }
2624
2625 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2626 }
2627 else
2628 {
2629 return Fail("%s: ProcessActivation failed", __func__);
2630 }
2631}
2632
2633template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002634 typename HalOperation = typename HalPolicy::Operation,
2635 typename HalModel = typename HalPolicy::Model>
2636bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002637{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002638 using HalOperand = typename HalPolicy::Operand;
2639
Mike Kelly999e2092019-08-15 10:46:46 +01002640 if (operation.inputs.size() != 1)
2641 {
2642 return Fail("%s: Optional inputs are not supported", __func__);
2643 }
2644
Mike Kelly46272802019-08-14 17:00:48 +01002645 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2646 if (!input.IsValid())
2647 {
2648 return Fail("%s: Operation has invalid inputs", __func__);
2649 }
2650
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002651 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002652 if (!output)
2653 {
2654 return Fail("%s: Could not read output 0", __func__);
2655 }
2656
2657 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2658 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2659
2660 if (IsDynamicTensor(outputInfo))
2661 {
2662 return Fail("%s: Dynamic output tensors are not supported", __func__);
2663 }
2664 if (outputInfo.GetNumDimensions() != 4u)
2665 {
2666 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2667 }
2668
2669 armnn::L2NormalizationDescriptor desc;
2670 desc.m_DataLayout = armnn::DataLayout::NHWC;
2671
2672 bool isSupported = false;
2673 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2674 IsL2NormalizationSupported,
2675 data.m_Backends,
2676 isSupported,
2677 inputInfo,
2678 outputInfo,
2679 desc);
2680 if (!isSupported)
2681 {
2682 return false;
2683 }
2684
2685 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2686 assert(layer != nullptr);
2687 input.Connect(layer->GetInputSlot(0));
2688
2689 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2690}
2691
2692template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002693 typename HalOperation = typename HalPolicy::Operation,
2694 typename HalModel = typename HalPolicy::Model>
2695bool ConvertLocalResponseNormalization(const HalOperation& operation,
2696 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002697 ConversionData& data)
2698{
Mike Kelly999e2092019-08-15 10:46:46 +01002699 if (operation.inputs.size() != 5)
2700 {
2701 return Fail("%s: Optional inputs are not supported", __func__);
2702 }
2703
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002704 using HalOperand = typename HalPolicy::Operand;
2705 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002706
2707 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2708 if (!input.IsValid())
2709 {
2710 return Fail("%s: Operation has invalid inputs", __func__);
2711 }
2712
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002713 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002714 if (!output)
2715 {
2716 return Fail("%s: Could not read output 0", __func__);
2717 }
2718
2719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2721
2722 if (IsDynamicTensor(outputInfo))
2723 {
2724 return Fail("%s: Dynamic output tensors are not supported", __func__);
2725 }
2726 if (outputInfo.GetNumDimensions() != 4u)
2727 {
2728 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2729 }
2730
2731 armnn::NormalizationDescriptor descriptor;
2732 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2733 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2734 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2735
2736 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002737 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002738 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2739 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2740 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2741 {
2742 return Fail("%s: Operation has invalid inputs", __func__);
2743 }
2744
2745 // ArmNN expects normSize to be the full size of the normalization
2746 // window rather than the radius as in AndroidNN.
2747 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2748
2749 bool isSupported = false;
2750 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2751 IsNormalizationSupported,
2752 data.m_Backends,
2753 isSupported,
2754 inputInfo,
2755 outputInfo,
2756 descriptor);
2757 if (!isSupported)
2758 {
2759 return false;
2760 }
2761
2762
2763 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2764 assert(layer != nullptr);
2765 input.Connect(layer->GetInputSlot(0));
2766
2767 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2768}
2769
2770template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002771 typename HalOperation = typename HalPolicy::Operation,
2772 typename HalModel = typename HalPolicy::Model>
2773bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002774{
Mike Kelly46272802019-08-14 17:00:48 +01002775 armnn::ActivationDescriptor desc;
2776 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2777
2778 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2779}
2780
2781template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002782 typename HalOperation = typename HalPolicy::Operation,
2783 typename HalModel = typename HalPolicy::Model>
2784bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002785{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002786 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002787
2788 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2789 if (!input.IsValid())
2790 {
2791 return Fail("%s: Operation has invalid inputs", __func__);
2792 }
2793
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002794 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002795 if (!output)
2796 {
2797 return Fail("%s: Could not read output 0", __func__);
2798 }
2799
2800 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2801 if (IsDynamicTensor(outputInfo))
2802 {
2803 return Fail("%s: Dynamic output tensors are not supported", __func__);
2804 }
2805
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002806 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002807 if (!axisOperand)
2808 {
2809 return Fail("%s: Could not read input 1", __func__);
2810 }
2811
2812 std::vector<int32_t> axis;
2813 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2814 {
2815 return Fail("%s: Input 1 has invalid values", __func__);
2816 }
2817
2818 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2819
2820 // Convert the axis to unsigned int and remove duplicates.
2821 unsigned int rank = inputInfo.GetNumDimensions();
2822 std::set<unsigned int> uniqueAxis;
2823 std::transform(axis.begin(), axis.end(),
2824 std::inserter(uniqueAxis, uniqueAxis.begin()),
2825 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2826
2827 // Get the "keep dims" flag.
2828 int32_t keepDims = 0;
2829 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2830 {
2831 return Fail("%s: Could not read input 2", __func__);
2832 }
2833
2834 armnn::MeanDescriptor descriptor;
2835 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2836 descriptor.m_KeepDims = keepDims > 0;
2837
2838 bool isSupported = false;
2839 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2840 IsMeanSupported,
2841 data.m_Backends,
2842 isSupported,
2843 inputInfo,
2844 outputInfo,
2845 descriptor);
2846 if (!isSupported)
2847 {
2848 return false;
2849 }
2850
2851 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2852 assert(layer != nullptr);
2853 input.Connect(layer->GetInputSlot(0));
2854
2855 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2856}
2857
2858template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002859 typename HalOperation = typename HalPolicy::Operation,
2860 typename HalModel = typename HalPolicy::Model>
2861bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002862{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002863 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002864
2865 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2866 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2867
2868 if (!input0.IsValid() || !input1.IsValid())
2869 {
2870 return Fail("%s: Operation has invalid inputs", __func__);
2871 }
2872
2873 // The FuseActivation parameter is always the input index 2
2874 // and it should be optional
2875 ActivationFn activationFunction;
2876 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2877 {
2878 return Fail("%s: Operation has invalid inputs", __func__);
2879 }
2880
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002881 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002882
2883 if (outputOperand == nullptr)
2884 {
2885 return false;
2886 }
2887
2888 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2889 if (IsDynamicTensor(outputInfo))
2890 {
2891 return Fail("%s: Dynamic output tensors are not supported", __func__);
2892 }
2893
2894 bool isSupported = false;
2895 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2896 IsMultiplicationSupported,
2897 data.m_Backends,
2898 isSupported,
2899 input0.GetTensorInfo(),
2900 input1.GetTensorInfo(),
2901 outputInfo);
2902 if (!isSupported)
2903 {
2904 return false;
2905 }
2906
2907 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2908 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2909
2910 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2911 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2912
2913 if (endLayer != nullptr)
2914 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002915 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2916 if (!isReshapeSupported)
2917 {
2918 return false;
2919 }
2920
Mike Kelly46272802019-08-14 17:00:48 +01002921 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2922 }
2923 else
2924 {
2925 return Fail("%s: ProcessActivation failed", __func__);
2926 }
2927}
2928
2929template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002930 typename HalOperation = typename HalPolicy::Operation,
2931 typename HalModel = typename HalPolicy::Model>
2932bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002933{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002934 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002935
Mike Kelly3c673942019-07-25 09:26:06 +01002936 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2937 if (!input.IsValid())
2938 {
2939 return Fail("%s: Operation has invalid inputs", __func__);
2940 }
2941
2942 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2943 unsigned int rank = inputInfo.GetNumDimensions();
2944
2945 armnn::PadDescriptor descriptor;
2946 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2947 {
2948 return Fail("%s: Could not convert paddings", __func__);
2949 }
2950
2951 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2952 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2953 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2954 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2955 {
2956 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2957 }
2958
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002959 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002960 if (!output)
2961 {
2962 return Fail("%s: Could not read output", __func__);
2963 }
2964
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002965 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002966 if (IsDynamicTensor(outputInfo))
2967 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002968 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002969 }
2970
2971 bool isSupported = false;
2972 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2973 IsPadSupported,
2974 data.m_Backends,
2975 isSupported,
2976 inputInfo,
2977 outputInfo,
2978 descriptor);
2979 if (!isSupported)
2980 {
2981 return false;
2982 }
2983
2984 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2985 assert(layer != nullptr);
2986 input.Connect(layer->GetInputSlot(0));
2987 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2988
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002989 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002990}
2991
Mike Kelly0a879362019-07-29 16:56:31 +01002992template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002993 typename HalOperation = typename HalPolicy::Operation,
2994 typename HalModel = typename HalPolicy::Model>
2995bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002996{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002997 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002998
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002999 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3000 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3001 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003002
3003 if (inputOperand == nullptr
3004 || requestedShapeOperand == nullptr
3005 || outputOperand == nullptr)
3006 {
3007 return Fail("%s: Operation has invalid inputs", __func__);
3008 }
3009
3010 if (requestedShapeOperand->dimensions.size() != 1)
3011 {
3012 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3013 __func__, requestedShapeOperand->dimensions.size());
3014 }
3015
3016 std::vector<int32_t> targetDimensions;
3017 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3018 {
3019 return Fail("%s: Could not read values of input 1", __func__);
3020 }
3021
3022 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3023
3024 Shape requestedShape;
3025 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3026 // function that resolves these values into a fully specified tensor shape.
3027 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3028 {
3029 return Fail("%s: Failed to resolve the requested shape", __func__);
3030 }
3031
3032 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3033 if (!SameShape(requestedShape, outputOperandShape))
3034 {
3035 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3036 }
3037
3038 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3039 if (!input.IsValid())
3040 {
3041 return Fail("%s: Could not read input 0", __func__);
3042 }
3043
3044 armnn::ReshapeDescriptor reshapeDescriptor;
3045 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3046 requestedShape.dimensions.data());
3047
3048 bool isSupported = false;
3049 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3050 IsReshapeSupported,
3051 data.m_Backends,
3052 isSupported,
3053 input.GetTensorInfo(),
3054 reshapeDescriptor);
3055 if (!isSupported)
3056 {
3057 return false;
3058 }
3059
3060 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3061 assert(layer != nullptr);
3062 input.Connect(layer->GetInputSlot(0));
3063
3064 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3065}
3066
3067template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003068 typename HalOperation = typename HalPolicy::Operation,
3069 typename HalModel = typename HalPolicy::Model>
3070bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003071{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003072 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003073
Mike Kelly0a879362019-07-29 16:56:31 +01003074 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3075 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3076
3077 if (!input0.IsValid() || !input1.IsValid())
3078 {
3079 return Fail("%s: Operation has invalid inputs", __func__);
3080 }
3081
3082 // The FuseActivation parameter is always the input index 2
3083 // and it should be optional
3084 ActivationFn activationFunction;
3085 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3086 {
3087 return Fail("%s: Operation has invalid inputs", __func__);
3088 }
3089
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003090 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003091 if (!output)
3092 {
3093 return Fail("%s: Could not read output 0", __func__);
3094 }
3095
3096 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3097 if (IsDynamicTensor(outputInfo))
3098 {
3099 return Fail("%s: Dynamic output tensors are not supported", __func__);
3100 }
3101
3102 bool isSupported = false;
3103 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3104 IsSubtractionSupported,
3105 data.m_Backends,
3106 isSupported,
3107 input0.GetTensorInfo(),
3108 input1.GetTensorInfo(),
3109 outputInfo);
3110 if (!isSupported)
3111 {
3112 return false;
3113 }
3114
3115 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3116 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3117
3118 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3119 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3120
3121 if (endLayer)
3122 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01003123 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3124 if (!isReshapeSupported)
3125 {
3126 return false;
3127 }
Mike Kelly0a879362019-07-29 16:56:31 +01003128 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3129 }
3130
3131 return Fail("%s: ProcessActivation failed", __func__);
3132}
3133
Finn Williams23b87b32019-07-30 11:44:05 +01003134template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003135 typename HalOperation = typename HalPolicy::Operation,
3136 typename HalModel = typename HalPolicy::Model>
3137bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003138{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003139 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003140
3141 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3142 if (!input.IsValid())
3143 {
3144 return Fail("%s: Operation has invalid inputs", __func__);
3145 }
3146
3147 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3148 unsigned int rank = inputInfo.GetNumDimensions();
3149 if (rank > 4)
3150 {
3151 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3152 }
3153
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003154 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003155 if (!output)
3156 {
3157 return Fail("%s: Could not read output 0", __func__);
3158 }
3159
3160 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3161 {
3162 return Fail("%s: Dynamic output tensors are not supported", __func__);
3163 }
3164
3165 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3166 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003167 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003168
3169 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3170
3171 std::vector<int32_t> axis;
3172 if (!axisOperand)
3173 {
3174 axis.assign(dimensionSequence,
3175 dimensionSequence + rank);
3176 }
3177 else
3178 {
3179 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3180 }
3181
3182 std::vector<uint32_t> outputDims;
3183 for (unsigned int i = 0; i < rank; i++)
3184 {
3185 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3186 auto currentDimension = inputInfo.GetShape()[i];
3187 if (skipSqueeze || currentDimension != 1)
3188 {
3189 outputDims.push_back(currentDimension);
3190 }
3191 }
3192
3193 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3194
3195 armnn::TensorInfo outputInfo = inputInfo;
3196 outputInfo.SetShape(outShape);
3197
3198 armnn::ReshapeDescriptor reshapeDesc;
3199 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3200
3201 bool isSupported = false;
3202 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3203 IsReshapeSupported,
3204 data.m_Backends,
3205 isSupported,
3206 inputInfo,
3207 reshapeDesc);
3208 if (!isSupported)
3209 {
3210 return false;
3211 }
3212
3213 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3214 assert(layer != nullptr);
3215 input.Connect(layer->GetInputSlot(0));
3216
3217 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3218}
3219
3220template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003221 typename HalOperation = typename HalPolicy::Operation,
3222 typename HalModel = typename HalPolicy::Model>
3223bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003224{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003225 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003226
3227 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3228 if (!input.IsValid())
3229 {
3230 return Fail("%s: Operation has invalid inputs", __func__);
3231 }
3232
3233 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3234 unsigned int rank = inputInfo.GetNumDimensions();
3235 if (rank > 4)
3236 {
3237 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3238 }
3239
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003240 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003241 if (!output)
3242 {
3243 return Fail("%s: Could not read output 0", __func__);
3244 }
3245
3246 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3247 if (IsDynamicTensor(outputInfo))
3248 {
3249 return Fail("%s: Dynamic output tensors are not supported", __func__);
3250 }
3251
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003252 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3253 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3254 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003255
3256 std::vector<int32_t> beginValues;
3257 std::vector<int32_t> endValues;
3258 std::vector<int32_t> stridesValues;
3259
3260 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003261 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003262 {
3263 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3264 {
3265 return false;
3266 }
3267
3268 if (operandValues.size() != rank)
3269 {
3270 return false;
3271 }
3272
3273 return true;
3274 };
3275
3276 if (!ValidateInputOperands(*beginOperand, beginValues)
3277 || !ValidateInputOperands(*endOperand, endValues)
3278 || !ValidateInputOperands(*stridesOperand, stridesValues))
3279 {
3280 return Fail("%s: Operation has invalid input operand", __func__);
3281 }
3282
3283 // Stride cannot have value '0'
3284 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3285 {
3286 return Fail("%s: Stride must be non-zero value.", __func__);
3287 }
3288
3289 armnn::StridedSliceDescriptor descriptor;
3290 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3291 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3292 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3293 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3294
3295 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3296 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3297 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3298 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3299 {
3300 return Fail("%s: Operation has invalid inputs", __func__);
3301 }
3302
3303 bool isSupported = false;
3304 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3305 IsStridedSliceSupported,
3306 data.m_Backends,
3307 isSupported,
3308 inputInfo,
3309 outputInfo,
3310 descriptor);
3311 if (!isSupported)
3312 {
3313 return false;
3314 }
3315
3316 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3317 assert(layer != nullptr);
3318 input.Connect(layer->GetInputSlot(0));
3319
3320 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3321}
3322
3323template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003324 typename HalOperation = typename HalPolicy::Operation,
3325 typename HalModel = typename HalPolicy::Model>
3326bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003327{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003328 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003329
3330 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3331 if (!input.IsValid())
3332 {
3333 return Fail("%s: Operation has invalid inputs", __func__);
3334 }
3335
3336 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3337 unsigned int rank = inputInfo.GetNumDimensions();
3338 if (rank > 4)
3339 {
3340 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3341 }
3342
3343 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3344 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003345 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003346
3347 std::vector<int32_t> perm(rank);
3348 if (!permOperand)
3349 {
3350 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3351 for (unsigned int i = rank; i > 0; i--)
3352 {
3353 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3354 }
3355 }
3356 else
3357 {
3358 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3359 }
3360
3361 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3362
Mike Kelly46272802019-08-14 17:00:48 +01003363 armnn::PermuteDescriptor permuteDesc;
Matthew Benthamc4aacb32019-11-27 16:55:05 +00003364 permuteDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003365
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003366 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003367 if (!output)
3368 {
3369 return Fail("%s: Could not read output 0", __func__);
3370 }
3371
3372 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003373 if (IsDynamicTensor(outputInfo))
3374 {
3375 return Fail("%s: Dynamic output tensors are not supported", __func__);
3376 }
3377
Mike Kelly46272802019-08-14 17:00:48 +01003378
3379 bool isSupported = false;
3380 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3381 IsPermuteSupported,
3382 data.m_Backends,
3383 isSupported,
3384 inputInfo,
3385 outputInfo,
3386 permuteDesc);
3387 if (!isSupported)
3388 {
3389 return false;
3390 }
3391
3392 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3393 assert(layer != nullptr);
3394 input.Connect(layer->GetInputSlot(0));
3395
3396 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3397}
3398
3399template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003400 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003401 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003402 typename HalModel = typename HalPolicy::Model>
3403bool ConvertBatchToSpaceNd(const HalOperation& operation,
3404 const HalModel& model,
3405 ConversionData& data)
3406{
Finn Williams23b87b32019-07-30 11:44:05 +01003407
3408 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3409 if (!input.IsValid())
3410 {
3411 return Fail("%s: Operation has invalid inputs", __func__);
3412 }
3413
3414 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3415 if (!output)
3416 {
3417 return Fail("%s: Could not read output 0", __func__);
3418 }
3419
3420 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3421 if (IsDynamicTensor(outputInfo))
3422 {
3423 return Fail("%s: Dynamic output tensors are not supported", __func__);
3424 }
3425
3426 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3427 if (!blockOperand)
3428 {
3429 return Fail("%s: Could not read input 1", __func__);
3430 }
3431
3432 // Convert the block operand to int32
3433 std::vector<int32_t> block;
3434 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3435 {
3436 return Fail("%s: Input 1 has invalid values", __func__);
3437 }
3438
3439 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3440
3441 unsigned int rank = inputInfo.GetNumDimensions();
3442 if (rank != 4)
3443 {
3444 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3445 }
3446
3447 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3448 {
3449 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3450 " greater than or equal to 1", __func__);
3451 }
3452
3453 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3454 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3455 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3456
3457 if (Is12Operand(*output))
3458 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003459 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003460 }
3461 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3462 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3463
3464 bool isSupported = false;
3465 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3466 IsBatchToSpaceNdSupported,
3467 data.m_Backends,
3468 isSupported,
3469 inputInfo,
3470 outputInfo,
3471 batchToSpaceNdDesc);
3472 if (!isSupported)
3473 {
3474 return false;
3475 }
3476
3477 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3478 assert(layer != nullptr);
3479 input.Connect(layer->GetInputSlot(0));
3480
3481 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3482}
Mike Kelly0a879362019-07-29 16:56:31 +01003483
Finn Williamsd74c5052019-07-30 17:06:00 +01003484template<typename HalPolicy,
3485 typename HalOperation = typename HalPolicy::Operation,
3486 typename HalOperand = typename HalPolicy::Operand,
3487 typename HalModel = typename HalPolicy::Model>
3488bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3489{
3490 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3491 if (!input.IsValid())
3492 {
3493 return Fail("%s: Operation has invalid inputs", __func__);
3494 }
3495
3496 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3497 unsigned int rank = inputInfo.GetNumDimensions();
3498 unsigned int spatialDim = rank - 2;
3499
3500 if (rank != 4)
3501 {
3502 Fail("%s: Only inputs with rank 4 are supported", __func__);
3503 }
3504
3505 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3506 if (!output)
3507 {
3508 return Fail("%s: Could not read output 0", __func__);
3509 }
3510
3511 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3512 if (IsDynamicTensor(outputInfo))
3513 {
3514 return Fail("%s: Dynamic output tensors are not supported", __func__);
3515 }
3516
3517 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3518 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3519
3520 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3521 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3522 {
3523 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3524 }
3525
3526 std::vector<int32_t> blockShape;
3527 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3528 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3529 {
3530 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3531 }
3532
3533 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3534 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3535 {
3536 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3537 }
3538
3539 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3540 std::vector<int32_t> paddings;
3541 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3542 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3543 {
3544 int paddingBeforeInput = paddings[i];
3545 int paddingAfterInput = paddings[i + 1];
3546 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3547 {
3548 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3549 }
3550
3551 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3552 }
3553
3554 armnn::SpaceToBatchNdDescriptor descriptor;
3555 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3556 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3557 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3558
3559 if (Is12Operand(*output))
3560 {
3561 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3562 }
3563
3564 bool isSupported = false;
3565 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3566 IsSpaceToBatchNdSupported,
3567 data.m_Backends,
3568 isSupported,
3569 inputInfo,
3570 outputInfo,
3571 descriptor);
3572 if (!isSupported)
3573 {
3574 return false;
3575 }
3576
3577 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3578 assert(layer != nullptr);
3579 input.Connect(layer->GetInputSlot(0));
3580
3581 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3582}
3583
Kevin May407718f2019-09-09 14:46:41 +01003584template<typename HalPolicy,
3585 typename HalOperation = typename HalPolicy::Operation,
3586 typename HalModel = typename HalPolicy::Model>
3587bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3588{
3589 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3590
3591 if (!input.IsValid())
3592 {
3593 return Fail("%s: Operation has invalid input", __func__);
3594 }
3595
3596 using HalOperand = typename HalPolicy::Operand;
3597 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3598 if (!output)
3599 {
3600 return Fail("%s: Could not read output 0", __func__);
3601 }
3602
3603 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3604 if (IsDynamicTensor(outputInfo))
3605 {
3606 return Fail("%s: Dynamic output tensors are not supported", __func__);
3607 }
3608
3609 bool isSupported = false;
3610 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3611 IsAbsSupported,
3612 data.m_Backends,
3613 isSupported,
3614 input.GetTensorInfo(),
3615 outputInfo);
3616
3617 if (!isSupported)
3618 {
3619 return false;
3620 }
3621
3622 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3623 assert(layer != nullptr);
3624 input.Connect(layer->GetInputSlot(0));
3625
3626 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3627}
3628
3629
saoste01b8471482018-10-10 09:44:51 +01003630} // namespace armnn_driver