blob: 3a144f782ff61dab9a69f8999cc415a2c5b9203f [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Mike Kellyb5fdf382019-06-11 16:35:25 +0100169template<typename Operand>
170armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
191 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100192 type == V1_2::OperandType::TENSOR_INT32;
193}
194
195#endif
196
197inline bool IsBool(V1_0::Operand)
198{
199 return false;
200}
201
Sadik Armagan61113162019-07-25 09:09:40 +0100202inline bool Is12Operand(V1_0::Operand)
203{
204 return false;
205}
206
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207#ifdef ARMNN_ANDROID_NN_V1_2
208
209inline bool IsBool(V1_2::Operand operand)
210{
211 return operand.type == V1_2::OperandType::BOOL;
212}
213
Sadik Armagan61113162019-07-25 09:09:40 +0100214/// Checks if a operand is 1_2 Operand
215inline bool Is12Operand(V1_2::Operand)
216{
217 return true;
218}
219
Mike Kellyb5fdf382019-06-11 16:35:25 +0100220#endif
221
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100222template<typename LayerHandleType>
223armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
224 armnn::TensorInfo reshapeInfo)
225{
226 armnn::ReshapeDescriptor reshapeDescriptor;
227 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
228
229 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
230 BOOST_ASSERT(reshapeLayer != nullptr);
231
232 // Attach the input layer to the reshape layer
233 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
234 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
235
236 return *reshapeLayer;
237}
238
Derek Lamberti2c90f752019-12-19 15:45:35 +0000239bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100240 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100241{
242 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100243
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100244 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
245 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
246
247 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
248 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
249
250 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100251 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100252 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
253 input0.Connect(startLayer->GetInputSlot(0));
254 input1.Connect(startLayer->GetInputSlot(1));
255
Sadik Armagan64b19b52019-08-19 09:49:58 +0100256 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100257 }
258
259 // Since the number of dimensions do not match then we need to add degenerate dimensions
260 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
261
262 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
263 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
264 boost::numeric_cast<int>(inputDimensions1));
265
266 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
267 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
268 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
269
270 const armnn::TensorShape& smallShape = smallInfo.GetShape();
271 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
272 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
273 {
274 reshapedDimensions[i] = smallShape[i - sizeDifference];
275 }
276
277 armnn::TensorInfo reshapedInfo = smallInfo;
278 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
279 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100280
281 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
282 armnn::ReshapeDescriptor reshapeDescriptor;
283
284 bool isSupported = false;
285 FORWARD_LAYER_SUPPORT_FUNC(__func__,
286 IsReshapeSupported,
287 data.m_Backends,
288 isSupported,
Derek Lamberti2c90f752019-12-19 15:45:35 +0000289 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100290 reshapedInfo,
291 reshapeDescriptor);
292 if (!isSupported)
293 {
294 return false;
295 }
296
297 BOOST_ASSERT(data.m_Network != nullptr);
298 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100299
300 if (input0IsSmaller)
301 {
302 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
303 //
304 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100305 // | |
306 // Reshape |
307 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100308 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100309
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
311 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100312 }
313 else
314 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100315 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
316 //
317 // Input0 Input1
318 // | |
319 // | Reshape
320 // \ /
321 // StartLayer
322
arovir01b0717b52018-09-05 17:03:25 +0100323 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100324 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100325 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100326
327 return true;
arovir01b0717b52018-09-05 17:03:25 +0100328}
329
330void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
331 android::nn::PaddingScheme scheme)
332{
333 int32_t padHead;
334 int32_t padTail;
335 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
336 outPadHead = boost::numeric_cast<uint32_t>(padHead);
337 outPadTail = boost::numeric_cast<uint32_t>(padTail);
338}
339
Mike Kelly86b36d42019-07-12 16:39:33 +0100340#ifdef ARMNN_ANDROID_NN_V1_2
341
342void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
343 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
344{
345 int32_t padHead;
346 int32_t padTail;
347 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
348 outPadHead = boost::numeric_cast<uint32_t>(padHead);
349 outPadTail = boost::numeric_cast<uint32_t>(padTail);
350}
351
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100352void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
353 int32_t& outPadTail, android::nn::PaddingScheme scheme)
354{
355 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
356}
357
Mike Kelly86b36d42019-07-12 16:39:33 +0100358#endif
359
Matthew Bentham912b3622019-05-03 15:49:14 +0100360Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100361{
362 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100363 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100364 shape.dimensions = operand.dimensions;
365 shape.scale = operand.scale;
366 shape.offset = operand.zeroPoint;
367 return shape;
368}
369
Mike Kelly46272802019-08-14 17:00:48 +0100370#ifdef ARMNN_ANDROID_NN_V1_2
371
372Shape GetOperandShape(const V1_2::Operand& operand)
373{
374 Shape shape;
375 shape.type = OperandType(operand.type);
376 shape.dimensions = operand.dimensions;
377 shape.scale = operand.scale;
378 shape.offset = operand.zeroPoint;
379 return shape;
380}
381
382#endif
383
arovir01b0717b52018-09-05 17:03:25 +0100384// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
385// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100386// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
387// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100388void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000389 const armnn::TensorInfo& weightInfo,
390 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100391{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000392 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100393 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000394 // NOTE: Bias scale is always set to 0 for per-axis quantization and
395 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
396 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100397 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 return biasScale * inputInfo.GetQuantizationScale();
399 };
400
401 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
402 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
403
404 biasInfo.SetQuantizationScales(biasScales);
405 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
406
407 ALOGV("Bias quantization params have been updated for per-axis quantization");
408 }
409 else
410 {
411 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
412 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
413 {
414 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
415 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
416 {
417 ALOGW("Bias quantization scale has been modified to match input * weights");
418 biasInfo.SetQuantizationScale(expectedBiasScale);
419 }
arovir01b0717b52018-09-05 17:03:25 +0100420 }
421 }
422}
423
424// 4D Tensor Permutations
425const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
426const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
427const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
428const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
429
430// 3D Permutation Vectors
431const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
432const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
433const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
434
435template<typename OSlot>
436armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
437 const armnn::PermutationVector& mappings)
438{
439 // Add swizzle layer
440 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
441
442 BOOST_ASSERT(layer != nullptr);
443
444 // Connect input to swizzle layer
445 input.Connect(layer->GetInputSlot(0));
446
447 // Setup swizzled output
448 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
449 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
450
451 return *layer;
452}
453
454void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
455{
456 // Add swizzle layer
457 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
458 // Connect swizzled input to layer
459 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
460}
461
462armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
463{
464 // Add deswizzle layer
465 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
466 return deswizzleLayer;
467}
468
469// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
470armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
471 LayerInputHandle& input,
472 armnn::IConnectableLayer& firstLayer,
473 armnn::IConnectableLayer& lastLayer)
474{
475 SwizzleIn(network, input, firstLayer, 0);
476 return DeswizzleOut(network, lastLayer, 0);
477}
478
479// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
480armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
481 armnn::IConnectableLayer& layer)
482{
483 return SwizzleInDeswizzleOut(network, input, layer, layer);
484}
485
486bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
487 const armnn::TensorShape & outputShape,
488 uint32_t concatDim)
489{
490 // Validate the output shape is correct given the input shapes (which have just been validated)
491 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
492 if (outputShape.GetNumDimensions() != numDimensions)
493 {
494 return Fail("%s: Output shape has wrong number of dimensions", __func__);
495 }
496
497 unsigned int outputSizeAlongConcatenatedDimension = 0;
498 for (unsigned int i = 0; i < inputShapes.size(); i++)
499 {
500 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
501 }
502
503 for (unsigned int i = 0; i < numDimensions; ++i)
504 {
505 if (i == concatDim)
506 {
507 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
508 {
509 return Fail(
510 "%s: Invalid output shape for dimension %d (%d != %d)",
511 __func__,
512 i,
513 outputShape[i],
514 outputSizeAlongConcatenatedDimension);
515 }
516 }
517 else
518 {
519 if (outputShape[i] != inputShapes[0][i])
520 {
521 return Fail("%s: Invalid output shape", __func__);
522 }
523 }
524 }
525
526 return true;
527}
528
529bool RequiresReshape(armnn::TensorShape & inputShape)
530{
531 return inputShape.GetNumDimensions() < 3;
532}
533
arovir01b0717b52018-09-05 17:03:25 +0100534void SwizzleInputs(armnn::INetwork& network,
535 std::vector<LayerInputHandle>& inputs,
536 std::vector<armnn::TensorShape>& inputShapes,
537 const armnn::PermutationVector& mapping)
538{
539 if (!mapping.IsEqual(IdentityPermutation4D))
540 {
541 size_t nInputs = inputs.size();
542 for (size_t i=0; i<nInputs; ++i)
543 {
544 // add swizzle layer
545 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
546 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
547 auto& outputInfo = outputSlot.GetTensorInfo();
548 // replace inputs with the swizzled ones
549 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
550 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
551 }
552 }
553}
554
Kevin Maydbbcc392019-12-12 16:33:31 +0000555bool CheckReshapeSupported(ConversionData& data,
556 std::vector<LayerInputHandle>& inputs,
557 std::vector<armnn::TensorShape>& inputShapes,
558 const armnn::PermutationVector& mapping,
559 const armnn::TensorInfo& outputInfo)
560{
561 if (!mapping.IsEqual(IdentityPermutation4D))
562 {
563 size_t nInputs = inputs.size();
564 for (size_t i=0; i<nInputs; ++i)
565 {
566 // check permute layer
567 armnn::PermuteDescriptor permuteDesc;
568 permuteDesc.m_DimMappings = mapping;
569
570 bool isSupported = false;
571 FORWARD_LAYER_SUPPORT_FUNC(__func__,
572 IsPermuteSupported,
573 data.m_Backends,
574 isSupported,
575 inputs[i].GetTensorInfo(),
576 outputInfo,
577 permuteDesc);
578 if (!isSupported)
579 {
580 return false;
581 }
582
583 }
584 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
585 }
586 return true;
587}
588
589
narpra01f176d5a2018-11-18 20:17:48 +0000590bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
591 int32_t & concatDimension,
592 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100593{
narpra01f176d5a2018-11-18 20:17:48 +0000594 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100595 BOOST_ASSERT(numberOfDimensions >= 3);
596
597 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000598 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
599 // or along dimension 0 or 2 for a 3-D tensor.
600 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100601 {
narpra01f176d5a2018-11-18 20:17:48 +0000602 concatDimension = 1;
603 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
604 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100605 }
narpra01f176d5a2018-11-18 20:17:48 +0000606 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100607 {
narpra01f176d5a2018-11-18 20:17:48 +0000608 concatDimension = 0;
609 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
610 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100611 }
narpra01f176d5a2018-11-18 20:17:48 +0000612 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100613}
614
615} // anonymous namespace
616
617namespace armnn_driver
618{
619
620//// Creates an ArmNN activation layer and connects it to the given layer, if the
621//// passed in AndroidNN activation function requires so.
622//// @return The end layer of the sequence of layers built for the given AndroidNN
623//// activation function or nullptr if an error occurred (e.g. unsupported activation).
624//// Note that the end layer matches the input layer if no activation is required
625//// (the sequence of layers has length 1).
626armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
627 ActivationFn activation,
628 armnn::IConnectableLayer* prevLayer,
629 ConversionData& data);
630
631} // namespace armnn_driver
632
633///
634/// Utility templates
635///
636
637namespace armnn_driver
638{
639
640using namespace android::nn;
641
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642template<typename HalPolicy,
643 typename HalOperand = typename HalPolicy::Operand,
644 typename HalOperation = typename HalPolicy::Operation,
645 typename HalModel = typename HalPolicy::Model>
646const HalOperand* GetInputOperand(const HalOperation& operation,
647 uint32_t inputIndex,
648 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100649 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100650{
651 if (inputIndex >= operation.inputs.size())
652 {
saoste01b8471482018-10-10 09:44:51 +0100653 if (failOnIndexOutOfBounds)
654 {
655 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
656 }
arovir01b0717b52018-09-05 17:03:25 +0100657 return nullptr;
658 }
659
660 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
661 return &model.operands[operation.inputs[inputIndex]];
662}
663
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100664template<typename HalPolicy,
665 typename HalOperand = typename HalPolicy::Operand,
666 typename HalOperation = typename HalPolicy::Operation,
667 typename HalModel = typename HalPolicy::Model>
668const HalOperand* GetOutputOperand(const HalOperation& operation,
669 uint32_t outputIndex,
670 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100671{
672 if (outputIndex >= operation.outputs.size())
673 {
674 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
675 return nullptr;
676 }
677
678 // Model should have been validated beforehand
679 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
680
681 return &model.operands[operation.outputs[outputIndex]];
682}
683
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100684template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100685 typename HalOperand = typename HalPolicy::Operand,
686 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100687const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100688 const HalModel& model,
689 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000690 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100691{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100692 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100693
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100694 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100695 switch (operand.lifetime)
696 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100697 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100698 {
699 // Constant found in model.operandValues
700 valueStart = &model.operandValues[operand.location.offset];
701 break;
702 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100703 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100704 {
705 // Constant specified via a Memory object
706 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
707 break;
708 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000710 {
711 // An optional input tensor with no values is not an error so should not register as a fail
712 if (optional)
713 {
714 valueStart = nullptr;
715 break;
716 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100717 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000718 }
arovir01b0717b52018-09-05 17:03:25 +0100719 default:
720 {
721 // Unsupported/invalid (e.g. can't get value of an input to the model)
722 Fail("%s: unsupported/invalid operand lifetime: %s",
723 __func__, toString(operand.lifetime).c_str());
724 valueStart = nullptr;
725 }
726 }
727
728 return valueStart;
729}
730
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100731template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100732 typename HalOperation = typename HalPolicy::Operation,
733 typename HalModel = typename HalPolicy::Model,
734 typename HalOperandType = typename HalPolicy::OperandType>
735bool GetOperandType(const HalOperation& operation,
736 uint32_t inputIndex,
737 const HalModel& model,
738 HalOperandType& type)
739{
740 using HalOperand = typename HalPolicy::Operand;
741
742 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
743 if (!operand)
744 {
745 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
746 }
747
748 type = operand->type;
749 return true;
750}
751
752template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000753 typename HalOperand = typename HalPolicy::Operand>
754bool IsOperandConstant(const HalOperand& operand)
755{
756 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
757
758 HalOperandLifeTime lifetime = operand.lifetime;
759
760 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
761 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
762 lifetime == HalOperandLifeTime::NO_VALUE;
763}
764
765template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100766 typename HalOperand = typename HalPolicy::Operand,
767 typename HalModel = typename HalPolicy::Model>
768ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
769 const HalModel& model,
770 const ConversionData& data,
771 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
772 const armnn::TensorShape* overrideTensorShape = nullptr,
773 bool optional = false)
774{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775 if (!IsOperandTypeSupportedForTensors(operand.type))
776 {
777 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
778 return ConstTensorPin();
779 }
780
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000781 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100782 {
783 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
784 return ConstTensorPin();
785 }
786
787 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
788 if (!valueStart)
789 {
790 if (optional)
791 {
792 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
793 return ConstTensorPin(true);
794 }
795 // mandatory tensor with no values
796 Fail("%s: failed to get operand address", __func__);
797 return ConstTensorPin();
798 }
799
800 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000801 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
802 if (tensorInfo.HasPerAxisQuantization())
803 {
804 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
805 }
806
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100807 if (overrideTensorShape != nullptr)
808 {
809 tensorInfo.SetShape(*overrideTensorShape);
810 }
811 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
812}
813
814template<typename HalPolicy,
815 typename HalOperation = typename HalPolicy::Operation,
816 typename HalModel = typename HalPolicy::Model>
817ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
818 uint32_t inputIndex,
819 const HalModel& model,
820 const ConversionData& data,
821 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
822 const armnn::TensorShape* overrideTensorShape = nullptr,
823 bool optional = false)
824{
825 using HalOperand = typename HalPolicy::Operand;
826
827 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
828 if (!operand)
829 {
830 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
831 return ConstTensorPin();
832 }
833 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
834 model,
835 data,
836 dimensionMappings,
837 overrideTensorShape,
838 optional);
839}
840
841template<typename HalPolicy,
842 typename OutputType,
843 typename HalOperandType = typename HalPolicy::OperandType,
844 typename HalOperation = typename HalPolicy::Operation,
845 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100846bool GetInputScalar(const HalOperation& operation,
847 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100848 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100849 OutputType& outValue,
850 const HalModel& model,
851 const ConversionData& data)
852{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 using HalOperand = typename HalPolicy::Operand;
854
855 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100856 if (!operand)
857 {
858 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
859 }
860
861 if (operand->type != type)
862 {
863 return Fail("%s: unexpected operand type: %s (should be %s)",
864 __func__, toString(operand->type).c_str(), toString(type).c_str());
865 }
866
867 if (operand->location.length != sizeof(OutputType))
868 {
869 return Fail("%s: incorrect operand location length: %i (should be %i)",
870 __func__, operand->location.length, sizeof(OutputType));
871 }
872
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100873 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100874 if (!valueAddress)
875 {
876 return Fail("%s: failed to get address for operand", __func__);
877 }
878
879 outValue = *(static_cast<const OutputType*>(valueAddress));
880 return true;
881}
882
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100883template<typename HalPolicy,
884 typename HalOperation = typename HalPolicy::Operation,
885 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100886bool GetInputInt32(const HalOperation& operation,
887 uint32_t inputIndex,
888 int32_t& outValue,
889 const HalModel& model,
890 const ConversionData& data)
891{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100893}
894
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100895template<typename HalPolicy,
896 typename HalOperation = typename HalPolicy::Operation,
897 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100898bool GetInputFloat32(const HalOperation& operation,
899 uint32_t inputIndex,
900 float& outValue,
901 const HalModel& model,
902 const ConversionData& data)
903{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100904 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100905}
906
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100907template<typename HalPolicy,
908 typename HalOperation = typename HalPolicy::Operation,
909 typename HalOperandType = typename HalPolicy::OperandType,
910 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100911bool GetInputActivationFunctionImpl(const HalOperation& operation,
912 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100913 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100914 ActivationFn& outActivationFunction,
915 const HalModel& model,
916 const ConversionData& data)
917{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100918 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100919 {
920 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
921 __func__,
922 toString(type).c_str(),
923 toString(OperandType::INT32).c_str(),
924 toString(OperandType::TENSOR_INT32).c_str());
925 }
926
927 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100928 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100929 {
930 return Fail("%s: failed to get activation input value", __func__);
931 }
932 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
933 return true;
934}
935
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936template<typename HalPolicy,
937 typename HalOperation = typename HalPolicy::Operation,
938 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100939bool GetInputActivationFunction(const HalOperation& operation,
940 uint32_t inputIndex,
941 ActivationFn& outActivationFunction,
942 const HalModel& model,
943 const ConversionData& data)
944{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945 return GetInputActivationFunctionImpl<HalPolicy>(operation,
946 inputIndex,
947 HalPolicy::OperandType::INT32,
948 outActivationFunction,
949 model,
950 data);
arovir01b0717b52018-09-05 17:03:25 +0100951}
952
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100953template<typename HalPolicy,
954 typename HalOperation = typename HalPolicy::Operation,
955 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100956bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
957 uint32_t inputIndex,
958 ActivationFn& outActivationFunction,
959 const HalModel& model,
960 const ConversionData& data)
961{
962 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100963 return GetInputActivationFunctionImpl<HalPolicy>(operation,
964 inputIndex,
965 HalPolicy::OperandType::INT32,
966 outActivationFunction,
967 model,
968 data);
arovir01b0717b52018-09-05 17:03:25 +0100969}
970
971
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972template<typename HalPolicy,
973 typename HalOperation = typename HalPolicy::Operation,
974 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100975bool GetOptionalInputActivation(const HalOperation& operation,
976 uint32_t inputIndex,
977 ActivationFn& activationFunction,
978 const HalModel& model,
979 const ConversionData& data)
980{
981 if (operation.inputs.size() <= inputIndex)
982 {
983 activationFunction = ActivationFn::kActivationNone;
984 }
985 else
986 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100988 {
989 return Fail("%s: Operation has invalid inputs", __func__);
990 }
991 }
992 return true;
993}
994
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100995template<typename HalPolicy,
996 typename ConvolutionDescriptor,
997 typename HalOperation = typename HalPolicy::Operation,
998 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100999bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1000 uint32_t dilationXIndex,
1001 ConvolutionDescriptor& descriptor,
1002 const HalModel& model,
1003 const ConversionData& data)
1004{
1005 bool success = true;
1006 if (operation.inputs.size() >= dilationXIndex + 2)
1007 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 success &= GetInputScalar<HalPolicy>(operation,
1009 dilationXIndex,
1010 HalPolicy::OperandType::INT32,
1011 descriptor.m_DilationX,
1012 model,
1013 data);
1014 success &= GetInputScalar<HalPolicy>(operation,
1015 dilationXIndex + 1,
1016 HalPolicy::OperandType::INT32,
1017 descriptor.m_DilationY,
1018 model,
1019 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001020 }
1021
1022 return success;
1023}
1024
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001025template<typename HalPolicy,
1026 typename HalOperand = typename HalPolicy::Operand,
1027 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001028bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001029 std::vector<int32_t>& outValues,
1030 const HalModel& model,
1031 const ConversionData& data)
1032{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001034 {
1035 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1036 }
1037
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001038 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001039 if (!startAddress)
1040 {
1041 return Fail("%s: failed to get operand address", __func__, operand.type);
1042 }
1043
1044 // Check number of bytes is sensible
1045 const uint32_t numBytes = operand.location.length;
1046 if (numBytes % sizeof(int32_t) != 0)
1047 {
1048 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1049 __func__, numBytes, sizeof(int32_t));
1050 }
1051
1052 outValues.resize(numBytes / sizeof(int32_t));
1053 memcpy(outValues.data(), startAddress, numBytes);
1054 return true;
1055}
1056
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001057template<typename HalPolicy,
1058 typename HalOperation = typename HalPolicy::Operation,
1059 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001060bool GetInputPaddingScheme(const HalOperation& operation,
1061 uint32_t inputIndex,
1062 PaddingScheme& outPaddingScheme,
1063 const HalModel& model,
1064 const ConversionData& data)
1065{
1066 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001067 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001068 {
1069 return Fail("%s: failed to get padding scheme input value", __func__);
1070 }
1071
1072 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1073 return true;
1074}
1075
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076template<typename HalPolicy,
1077 typename HalOperation = typename HalPolicy::Operation,
1078 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001079LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1080 uint32_t inputIndex,
1081 const HalModel& model,
1082 ConversionData& data)
1083{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001084 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001085 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001086 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1087
1088 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001089 if (!operand)
1090 {
1091 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1092 return LayerInputHandle();
1093 }
1094
1095 if (!IsOperandTypeSupportedForTensors(operand->type))
1096 {
1097 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1098 return LayerInputHandle();
1099 }
1100
Sadik Armagan44bcc022019-06-18 17:21:36 +01001101 try
arovir01b0717b52018-09-05 17:03:25 +01001102 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001104 if (IsDynamicTensor(operandTensorInfo))
1105 {
1106 Fail("%s: dynamic input tensors are not supported", __func__);
1107 return LayerInputHandle();
1108 }
arovir01b0717b52018-09-05 17:03:25 +01001109
Sadik Armagan44bcc022019-06-18 17:21:36 +01001110 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001111 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001112 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001113 {
1114 // NOTE: We must check whether we can support the input tensor on at least one
1115 // of the provided backends; otherwise we cannot convert the operation
1116 bool isInputSupported = false;
1117 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1118 IsInputSupported,
1119 data.m_Backends,
1120 isInputSupported,
1121 operandTensorInfo);
1122
1123 if (!isInputSupported)
1124 {
1125 Fail("%s: unsupported input tensor", __func__);
1126 return LayerInputHandle();
1127 }
1128
1129 BOOST_FALLTHROUGH; // intentional fallthrough
1130 }
1131 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001132 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001133 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001134 // The tensor is either an operand internal to the model, or a model input.
1135 // It can be associated with an ArmNN output slot for an existing layer.
1136
1137 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1138 const uint32_t operandIndex = operation.inputs[inputIndex];
1139 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001140 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001141 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001142 case HalOperandLifeTime::CONSTANT_REFERENCE:
1143 {
1144 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1145 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1146 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001147 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001148 bool isSupported = false;
1149 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1150 IsConstantSupported,
1151 data.m_Backends,
1152 isSupported,
1153 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001154 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001155 {
1156 return LayerInputHandle();
1157 }
1158
1159 armnn::IConnectableLayer* constantLayer =
1160 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1161 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1162 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1163
1164 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1165 }
1166 else
1167 {
1168 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001169 return LayerInputHandle();
1170 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001171 break;
arovir01b0717b52018-09-05 17:03:25 +01001172 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001173 default:
arovir01b0717b52018-09-05 17:03:25 +01001174 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001175 // Unsupported lifetime for an input tensor
1176 Fail("%s: unsupported lifetime for input tensor: %s",
1177 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001178 return LayerInputHandle();
1179 }
arovir01b0717b52018-09-05 17:03:25 +01001180 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001181 }
1182 catch (UnsupportedOperand<HalOperandType>& e)
1183 {
1184 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1185 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001186 }
1187}
1188
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001189template<typename HalPolicy,
1190 typename HalOperation = typename HalPolicy::Operation,
1191 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001192bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1193 uint32_t operationOutputIndex,
1194 armnn::IConnectableLayer& layer,
1195 uint32_t layerOutputIndex,
1196 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001197 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001198{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199 using HalOperand = typename HalPolicy::Operand;
1200
1201 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001202 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1203 {
1204 return false;
1205 }
1206
1207 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1208
1209 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1210 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1211
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001212 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001213
1214 return true;
1215}
1216
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001217template<typename HalPolicy,
1218 typename HalOperation = typename HalPolicy::Operation,
1219 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001220armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1221 uint32_t inputIndex,
1222 const HalModel& model,
1223 ConversionData& data)
1224{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001225 using HalOperand = typename HalPolicy::Operand;
1226
1227 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001228 if (!operand)
1229 {
1230 return armnn::DataLayout::NHWC;
1231 }
1232
1233 if (!IsBool(*operand))
1234 {
1235 return armnn::DataLayout::NHWC;
1236 }
1237
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001238 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001239 if (!valueAddress)
1240 {
1241 return armnn::DataLayout::NHWC;
1242 }
1243
1244 if (*(static_cast<const bool*>(valueAddress)))
1245 {
1246 return armnn::DataLayout::NCHW;
1247 }
1248 else
1249 {
1250 return armnn::DataLayout::NHWC;
1251 }
1252}
1253
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001254template<typename HalPolicy,
1255 typename HalOperation = typename HalPolicy::Operation,
1256 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001257bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1258 uint32_t outputIndex,
1259 armnn::IConnectableLayer& layer,
1260 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001261 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001262{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001263 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1264 outputIndex,
1265 layer,
1266 outputIndex,
1267 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001268 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001269}
1270
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001271template<typename HalPolicy,
1272 typename HalOperation = typename HalPolicy::Operation,
1273 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001274bool ConvertToActivation(const HalOperation& operation,
1275 const char* operationName,
1276 const armnn::ActivationDescriptor& activationDesc,
1277 const HalModel& model,
1278 ConversionData& data)
1279{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001280 using HalOperand = typename HalPolicy::Operand;
1281
1282 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001283 if (!input.IsValid())
1284 {
1285 return Fail("%s: Input 0 is invalid", operationName);
1286 }
1287
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001288 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001289 if (!outputOperand)
1290 {
1291 return false;
1292 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001293
1294 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001295 if (IsDynamicTensor(outInfo))
1296 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001297 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001298 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001299
1300 bool isSupported = false;
1301 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1302 IsActivationSupported,
1303 data.m_Backends,
1304 isSupported,
1305 input.GetTensorInfo(),
1306 outInfo,
1307 activationDesc);
1308 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001309 {
1310 return false;
1311 }
1312
1313 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1314 BOOST_ASSERT(layer != nullptr);
1315 input.Connect(layer->GetInputSlot(0));
1316
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001317 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001318}
1319
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001320template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001321 typename HalOperation = typename HalPolicy::Operation,
1322 typename HalModel = typename HalPolicy::Model>
1323bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1324{
1325 armnn::ActivationDescriptor desc;
1326 desc.m_Function = armnn::ActivationFunction::ReLu;
1327
1328 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1329}
1330
1331template<typename HalPolicy,
1332 typename HalOperation = typename HalPolicy::Operation,
1333 typename HalModel = typename HalPolicy::Model>
1334bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1335{
1336 armnn::ActivationDescriptor desc;
1337 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1338 desc.m_A = 1.0f;
1339 desc.m_B = -1.0f;
1340
1341 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1342}
1343
1344template<typename HalPolicy,
1345 typename HalOperation = typename HalPolicy::Operation,
1346 typename HalModel = typename HalPolicy::Model>
1347bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1348{
1349 armnn::ActivationDescriptor desc;
1350 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1351 desc.m_A = 6.0f;
1352
1353 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1354}
1355
1356template<typename HalPolicy,
1357 typename HalOperation = typename HalPolicy::Operation,
1358 typename HalModel = typename HalPolicy::Model>
1359bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1360{
1361 armnn::ActivationDescriptor desc;
1362 desc.m_Function = armnn::ActivationFunction::TanH;
1363 desc.m_A = 1.0f; // android nn does not support tanH parameters
1364 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1365
1366 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1367}
1368
1369template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001370 typename HalOperation = typename HalPolicy::Operation,
1371 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001372bool ConvertPaddings(const HalOperation& operation,
1373 const HalModel& model,
1374 ConversionData& data,
1375 unsigned int rank,
1376 armnn::PadDescriptor& padDescriptor)
1377{
1378 using HalOperand = typename HalPolicy::Operand;
1379
1380 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1381 if (!paddingsOperand)
1382 {
1383 return Fail("%s: Could not read paddings operand", __func__);
1384 }
1385
1386 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1387 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1388 {
1389 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1390 }
1391
1392 std::vector<int32_t> paddings;
1393 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1394
1395 // add padding for each dimension of input tensor.
1396 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1397 {
1398 int paddingBeforeInput = paddings[i];
1399 int paddingAfterInput = paddings[i + 1];
1400
1401 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1402 {
1403 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1404 }
1405
1406 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1407 }
1408
1409 return true;
1410}
1411
1412template<typename HalPolicy,
1413 typename HalOperation = typename HalPolicy::Operation,
1414 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001415bool ConvertPooling2d(const HalOperation& operation,
1416 const char* operationName,
1417 armnn::PoolingAlgorithm poolType,
1418 const HalModel& model,
1419 ConversionData& data)
1420{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001421 using HalOperand = typename HalPolicy::Operand;
1422 using HalOperandType = typename HalPolicy::OperandType;
1423
1424 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001425 if (!input.IsValid())
1426 {
FinnWilliamsArm54c59752019-11-25 16:02:07 +00001427 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001428 }
1429
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001430 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001431 if (!output)
1432 {
1433 return Fail("%s: Could not read output 0", __func__);
1434 }
1435
1436 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1437 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1438
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001439 if (IsDynamicTensor(outputInfo))
1440 {
1441 return Fail("%s: Dynamic output tensors are not supported", __func__);
1442 }
1443
arovir01b0717b52018-09-05 17:03:25 +01001444 armnn::Pooling2dDescriptor desc;
1445 desc.m_PoolType = poolType;
1446 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001447 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001448
1449 ActivationFn activation;
1450
Sadik Armagan15d63e22019-07-26 16:59:35 +01001451 auto inputSize = operation.inputs.size();
1452
1453 if (inputSize >= 10)
1454 {
1455 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1456 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1457 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1458 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1459 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1460 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1461 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1462 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1463 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1464 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1465 {
1466 return Fail("%s: Operation has invalid inputs", operationName);
1467 }
1468
1469 if (Is12Operand(*output))
1470 {
1471 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1472 }
1473 }
1474 else
arovir01b0717b52018-09-05 17:03:25 +01001475 {
1476 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1477 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001478 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1479 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1480 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1481 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1482 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1483 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001484 {
1485 return Fail("%s: Operation has invalid inputs", operationName);
1486 }
1487
Sadik Armagan15d63e22019-07-26 16:59:35 +01001488 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001489 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001490 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001491 }
FinnWilliamsArm54c59752019-11-25 16:02:07 +00001492
1493 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1494 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1495 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1496
1497 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1498 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001499 }
1500
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001501 bool isSupported = false;
1502 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1503 IsPooling2dSupported,
1504 data.m_Backends,
1505 isSupported,
1506 inputInfo,
1507 outputInfo,
1508 desc);
1509 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001510 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001511 return false;
arovir01b0717b52018-09-05 17:03:25 +01001512 }
arovir01b0717b52018-09-05 17:03:25 +01001513
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001514 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1515 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001516 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001517 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001518 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001519
1520 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1521 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001522 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001523 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001524 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001525
1526 input.Connect(pooling2dLayer->GetInputSlot(0));
1527
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001528 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001529}
1530
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001531template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001532 typename Operation = typename HalPolicy::Operation,
1533 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001534bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1535{
1536 using Operand = typename HalPolicy::Operand;
1537
1538 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1539 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1540
1541 if (!input0.IsValid() || !input1.IsValid())
1542 {
1543 return Fail("%s: Operation has invalid inputs", __func__);
1544 }
1545
1546 // The FuseActivation parameter is always the input index 2
1547 // and it should be optional
1548 ActivationFn activationFunction;
1549 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1550 {
1551 return Fail("%s: Operation has invalid inputs", __func__);
1552 }
1553
1554 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1555 if (!outputOperand)
1556 {
1557 return false;
1558 }
1559
1560 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1561 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1562
1563 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1564 if (IsDynamicTensor(outputInfo))
1565 {
1566 return Fail("%s: Dynamic output tensors are not supported", __func__);
1567 }
1568
1569 bool isSupported = false;
1570 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1571 IsAdditionSupported,
1572 data.m_Backends,
1573 isSupported,
1574 inputInfo0,
1575 inputInfo1,
1576 outputInfo);
1577 if (!isSupported)
1578 {
1579 return false;
1580 }
1581
1582 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1583 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1584
1585 if (endLayer != nullptr)
1586 {
Derek Lamberti2c90f752019-12-19 15:45:35 +00001587 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001588 if (!isReshapeSupported)
1589 {
1590 return false;
1591 }
1592
Mike Kelly46272802019-08-14 17:00:48 +01001593 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1594 }
1595 else
1596 {
1597 return Fail("%s: ProcessActivation failed", __func__);
1598 }
1599}
1600
1601template<typename HalPolicy,
1602 typename Operation = typename HalPolicy::Operation,
1603 typename Model = typename HalPolicy::Model>
Francis Murtagha23334e2019-11-19 12:06:47 +00001604bool ConvertArgMinMax(const Operation& operation,
1605 const Model& model,
1606 ConversionData& data,
1607 armnn::ArgMinMaxFunction argMinMaxFunction)
1608{
1609 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1610
1611 using HalOperand = typename HalPolicy::Operand;
1612 using HalOperandType = typename HalPolicy::OperandType;
1613
1614 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1615
1616 if (!input0.IsValid())
1617 {
1618 return Fail("%s: Operation has invalid inputs", __func__);
1619 }
1620
1621 int32_t axis;
1622 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1623 {
1624 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1625 }
1626
1627 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1628 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1629
1630 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1631 {
1632 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1633 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1634 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1635 return Fail("%s: Axis must be in range [-n, n)", __func__);
1636 }
1637
1638 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1639 if (!output)
1640 {
1641 return Fail("%s: Could not read output 0", __func__);
1642 }
1643
1644 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1645
1646 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1647 if (IsDynamicTensor(outputInfo))
1648 {
1649 return Fail("%s: Dynamic output tensors are not supported", __func__);
1650 }
1651
1652 armnn::ArgMinMaxDescriptor descriptor;
1653 descriptor.m_Function = argMinMaxFunction;
1654 descriptor.m_Axis = axis;
1655
1656 bool isSupported = false;
1657 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1658 IsArgMinMaxSupported,
1659 data.m_Backends,
1660 isSupported,
1661 inputInfo0,
1662 outputInfo,
1663 descriptor);
1664 if (!isSupported)
1665 {
1666 return false;
1667 }
1668
1669 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1670 assert(layer != nullptr);
1671
1672 input0.Connect(layer->GetInputSlot(0));
1673
1674 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1675}
1676
1677template<typename HalPolicy,
1678 typename Operation = typename HalPolicy::Operation,
1679 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001680bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1681{
1682 using HalOperand = typename HalPolicy::Operand;
1683 using HalOperandType = typename HalPolicy::OperandType;
1684
1685 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1686 if (operation.inputs.size() <= 1)
1687 {
1688 return Fail("%s: Operation has insufficient arguments", __func__);
1689 }
1690
1691 // Get inputs and outputs
1692 const std::size_t numInputTensors = operation.inputs.size() - 1;
1693
1694 int32_t concatDim;
1695 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1696 {
1697 return Fail("%s: Operation has invalid inputs", __func__);
1698 }
1699
1700 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1701 if (!outputOperand)
1702 {
1703 return Fail("%s: Operation has no outputs", __func__);
1704 }
1705
1706
1707 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1708 armnn::TensorShape outputShape = outputInfo.GetShape();
1709
1710 //
1711 // handle negative concat dims along the lines of tensorflow as described here:
1712 // https://www.tensorflow.org/api_docs/python/tf/concat
1713 // "negative axis refers to axis + rank(values)-th dimension"
1714 //
1715 if (concatDim < 0)
1716 {
1717 concatDim += outputShape.GetNumDimensions();
1718 }
1719
1720 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1721 {
1722 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1723 }
1724
1725 std::vector<LayerInputHandle> inputHandles;
1726 std::vector<armnn::TensorShape> inputShapes;
1727
1728 inputHandles.reserve(numInputTensors);
1729 inputShapes.reserve(numInputTensors);
1730
1731 bool inputsHaveBeenReshaped = false;
1732 unsigned int tensorDimensionsAdded = 0;
1733
1734 for (uint32_t i = 0; i < numInputTensors; ++i)
1735 {
1736 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1737 if (!operand)
1738 {
1739 return Fail("%s: Operation has invalid inputs", __func__);
1740 }
1741
Teresa Charlin3b959602019-10-31 17:05:47 +00001742 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1743 if (!operandInputHandle.IsValid())
1744 {
1745 return Fail("%s: Operation has invalid inputs", __func__);
1746 }
Mike Kellyb8805202019-07-31 17:25:43 +01001747
Teresa Charlin3b959602019-10-31 17:05:47 +00001748 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001749 if (operandShape.GetNumDimensions() == 0)
1750 {
1751 return Fail("%s: Operands with rank 0 are not supported", __func__);
1752 }
1753
1754 if (RequiresReshape(operandShape))
1755 {
1756 inputsHaveBeenReshaped = true;
1757
1758 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1759
1760 // Expand the tensor to three dimensions
1761 if (operandShape.GetNumDimensions() == 2)
1762 {
1763 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1764 tensorDimensionsAdded = 1;
1765 }
1766 else
1767 {
1768 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1769 tensorDimensionsAdded = 2;
1770 }
1771
Kevin Maydbbcc392019-12-12 16:33:31 +00001772 armnn::ReshapeDescriptor reshapeDescriptor;
1773 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1774
1775 bool isSupported = false;
1776 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1777 IsReshapeSupported,
1778 data.m_Backends,
1779 isSupported,
1780 operandInputHandle.GetTensorInfo(),
1781 reshapeInfo,
1782 reshapeDescriptor);
1783 if (!isSupported)
1784 {
1785 return false;
1786 }
1787
Mike Kellyb8805202019-07-31 17:25:43 +01001788 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1789 *data.m_Network,
1790 operandInputHandle,
1791 reshapeInfo
1792 );
1793
1794 // Point to the reshape operation rather then the input operation
1795 operandShape = reshapeInfo.GetShape();
1796 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1797 }
1798
1799 inputShapes.emplace_back(operandShape);
1800 inputHandles.emplace_back(operandInputHandle);
1801
1802 if (!inputHandles.back().IsValid())
1803 {
1804 return Fail("%s: Operation has invalid inputs", __func__);
1805 }
1806 }
1807
1808 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1809
1810 if (inputsHaveBeenReshaped)
1811 {
1812 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1813 concatDim += tensorDimensionsAdded;
1814
1815 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1816 if (tensorDimensionsAdded == 1)
1817 {
1818 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1819 }
1820 else if (tensorDimensionsAdded == 2)
1821 {
1822 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1823 }
1824 }
1825
1826 // Check if permutations is required and get the pair of permutations required for the concatenation.
1827 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1828 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1829 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1830
1831 bool needPermute =
1832 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1833
1834 if (needPermute)
1835 {
1836 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1837 }
1838
1839 outputInfo.SetShape(outputShape);
1840
1841 // this is no-op for identity swizzles, otherwise it replaces both
1842 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Maydbbcc392019-12-12 16:33:31 +00001843 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1844 {
1845 return false;
1846 }
Mike Kellyb8805202019-07-31 17:25:43 +01001847
1848 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1849 armnn::OriginsDescriptor concatDescriptor;
1850
1851 try
1852 {
1853 // The concat descriptor is always created across the only supported concat dimension
1854 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1855 concatDescriptor =
1856 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1857 }
Mike Kelly0518d712019-12-11 19:27:11 +00001858 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001859 {
1860 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1861 }
1862
1863 // Validate the output shape is correct given the input shapes based on the
1864 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1865 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1866 {
1867 return Fail("%s: Error validating the output shape for concat", __func__);
1868 }
1869
1870 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1871 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1872 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1873
1874 bool isSupported = false;
1875 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1876 IsConcatSupported,
1877 data.m_Backends,
1878 isSupported,
1879 inputTensorInfos,
1880 outputInfo,
1881 concatDescriptor);
1882 if (!isSupported)
1883 {
1884 return false;
1885 }
1886
1887 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1888 assert(layer != nullptr);
1889 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1890
1891 // Connect inputs to the layer
1892 const int numInputSlots = layer->GetNumInputSlots();
1893 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1894 for (int i = 0; i < numInputSlots; ++i)
1895 {
1896 // connect the input directly to the merge (concat) layer
1897 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1898 }
1899
1900 if (needPermute)
1901 {
Kevin Maydbbcc392019-12-12 16:33:31 +00001902 armnn::PermuteDescriptor permuteDesc;
1903 permuteDesc.m_DimMappings = permutationPair.second;
1904
1905 bool isSupported = false;
1906 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1907 IsPermuteSupported,
1908 data.m_Backends,
1909 isSupported,
1910 layer->GetOutputSlot(0).GetTensorInfo(),
1911 outputInfo,
1912 permuteDesc);
1913 if (!isSupported)
1914 {
1915 return false;
1916 }
Mike Kellyb8805202019-07-31 17:25:43 +01001917 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1918 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1919 layer->GetOutputSlot(0),
1920 permutationPair.second);
1921 layer = &deswizzleLayer;
1922 }
1923
1924 if (inputsHaveBeenReshaped)
1925 {
1926 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1927
1928 // Undo the reshape knowing the amount of dimensions added
1929 if (tensorDimensionsAdded == 1)
1930 {
1931 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1932 afterConcatInfo.GetShape()[2] }));
1933 }
1934 else if (tensorDimensionsAdded == 2)
1935 {
1936 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1937 }
1938
Kevin Maydbbcc392019-12-12 16:33:31 +00001939 armnn::ReshapeDescriptor reshapeDescriptor;
1940 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1941
1942 bool isSupported = false;
1943 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1944 IsReshapeSupported,
1945 data.m_Backends,
1946 isSupported,
1947 layer->GetOutputSlot(0).GetTensorInfo(),
1948 afterConcatInfo,
1949 reshapeDescriptor);
1950 if (!isSupported)
1951 {
1952 return false;
1953 }
1954
Mike Kellyb8805202019-07-31 17:25:43 +01001955 layer = &AddReshapeLayer(
1956 *data.m_Network,
1957 layer->GetOutputSlot(0),
1958 afterConcatInfo
1959 );
1960 }
1961
1962 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1963}
1964
1965template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001966 typename HalOperation = typename HalPolicy::Operation,
1967 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001968bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1969{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001970 using HalOperand = typename HalPolicy::Operand;
1971 using HalOperandType = typename HalPolicy::OperandType;
1972
1973 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001974 if (!input.IsValid())
1975 {
1976 return Fail("%s: Operation has invalid inputs", __func__);
1977 }
1978
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001979 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001980 if (!output)
1981 {
1982 return Fail("%s: Could not read output 0", __func__);
1983 }
1984
1985 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001986 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001987
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001988 if (IsDynamicTensor(outputInfo))
1989 {
1990 return Fail("%s: Dynamic output tensors are not supported", __func__);
1991 }
1992
Mike Kellyb5fdf382019-06-11 16:35:25 +01001993 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001994 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1995 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001996
1997 if (!weightsPin.IsValid() || !biasPin.IsValid())
1998 {
1999 return Fail("%s: Operation has invalid inputs", __func__);
2000 }
2001
2002 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002003 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002004 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2005
2006 armnn::Convolution2dDescriptor desc;
2007 desc.m_DataLayout = armnn::DataLayout::NHWC;
2008 ActivationFn activation;
2009
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002010 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002011 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002012 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2013 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2014 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2015 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2016 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2017 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002018 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002019 {
2020 return Fail("%s: Operation has invalid inputs", __func__);
2021 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002022 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002023 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002024 {
2025 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002026 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2027 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2028 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002029 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002030 {
2031 return Fail("%s: Operation has invalid inputs", __func__);
2032 }
2033
2034 const uint32_t kernelX = weights.GetShape()[2];
2035 const uint32_t kernelY = weights.GetShape()[1];
2036 const uint32_t inputX = inputInfo.GetShape()[2];
2037 const uint32_t inputY = inputInfo.GetShape()[1];
2038
2039 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2040 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002041 }
2042 else
2043 {
2044 return Fail("%s: Unsupported number of operation inputs", __func__);
2045 }
2046
2047 desc.m_BiasEnabled = true;
2048 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2049
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002050 bool isSupported = false;
2051 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2052 IsConvolution2dSupported,
2053 data.m_Backends,
2054 isSupported,
2055 inputInfo,
2056 outputInfo,
2057 desc,
2058 weights.GetInfo(),
2059 biases);
2060 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002061 {
2062 return false;
2063 }
2064
2065 armnn::IConnectableLayer* startLayer =
2066 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2067
2068 if (!startLayer)
2069 {
2070 return Fail("%s: AddConvolution2dLayer failed", __func__);
2071 }
2072
2073 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2074
2075 if (!endLayer)
2076 {
2077 return Fail("%s: ProcessActivation failed", __func__);
2078 }
2079
2080 input.Connect(startLayer->GetInputSlot(0));
2081
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002082 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002083}
2084
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002085template<typename HalPolicy,
2086 typename HalOperation = typename HalPolicy::Operation,
2087 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002088bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2089{
2090 using HalOperand = typename HalPolicy::Operand;
2091 using HalOperandType = typename HalPolicy::OperandType;
2092
2093 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2094 if (!input.IsValid() )
2095 {
2096 return Fail("%s: Operation has invalid inputs", __func__);
2097 }
2098
2099 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2100 unsigned int rank = inputInfo.GetNumDimensions();
2101 if (rank != 4)
2102 {
2103 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2104 }
2105
2106 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2107 if (!output)
2108 {
2109 return Fail("%s: Could not read output 0", __func__);
2110 }
2111
2112 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2113 if (IsDynamicTensor(outputInfo))
2114 {
2115 return Fail("%s: Dynamic output tensors are not supported", __func__);
2116 }
2117
2118 armnn::DepthToSpaceDescriptor descriptor;
2119
2120 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2121 if (descriptor.m_BlockSize <= 1)
2122 {
2123 return Fail("%s: Block size must be at least 1 in all dimensions");
2124 }
2125
2126 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2127 if (Is12Operand(*output))
2128 {
2129 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2130 }
2131
2132 bool isSupported = false;
2133 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2134 IsDepthToSpaceSupported,
2135 data.m_Backends,
2136 isSupported,
2137 inputInfo,
2138 outputInfo,
2139 descriptor);
2140 if (!isSupported)
2141 {
2142 return false;
2143 }
2144
2145 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2146 assert(layer != nullptr);
2147 input.Connect(layer->GetInputSlot(0));
2148
2149 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2150}
2151
2152template<typename HalPolicy,
2153 typename HalOperation = typename HalPolicy::Operation,
2154 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002155bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2156{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002157 using HalOperand = typename HalPolicy::Operand;
2158 using HalOperandType = typename HalPolicy::OperandType;
2159
2160 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002161
2162 if (!input.IsValid())
2163 {
2164 return Fail("%s: Operation has invalid inputs", __func__);
2165 }
2166
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002167 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002168
2169 if (!output)
2170 {
2171 return Fail("%s: Could not read output 0", __func__);
2172 }
2173
2174 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002175 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002176
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002177 if (IsDynamicTensor(outputInfo))
2178 {
2179 return Fail("%s: Dynamic output tensors are not supported", __func__);
2180 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002181
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002182 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002183 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002184 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002185
2186 if (weightsOperand == nullptr)
2187 {
2188 return Fail("%s: Operand is invalid", __func__);
2189 }
2190 armnn::DepthwiseConvolution2dDescriptor desc;
2191 desc.m_DataLayout = armnn::DataLayout::NHWC;
2192
Mike Kellyb5fdf382019-06-11 16:35:25 +01002193 // Reinterpret weight data as [ H, W, I, M ]
2194 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2195 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002196 inputInfo.GetShape()[3],
2197 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002198
2199 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2200 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2201
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002202 const ConstTensorPin weightsPin =
2203 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2204 1,
2205 model,
2206 data,
2207 HWIMToMIHW,
2208 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002209
2210 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002211 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002212
2213 if (!weightsPin.IsValid() || !biasPin.IsValid())
2214 {
2215 return Fail("%s: Operation has invalid inputs", __func__);
2216 }
2217
2218 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2219 armnn::ConstTensor bias = biasPin.GetConstTensor();
2220 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2221
2222 ActivationFn activation;
2223
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002224 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002225 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002226 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2227 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2228 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2229 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2230 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2231 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002232 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002233 {
2234 return Fail("%s: Operation has invalid inputs", __func__);
2235 }
2236 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002237 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002238 {
2239 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002240 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2241 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2242 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002243 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002244 {
2245 return Fail("%s: Operation has invalid inputs", __func__);
2246 }
2247
2248 const uint32_t kernelX = weights.GetShape()[3];
2249 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002250 const uint32_t inputX = inputInfo.GetShape()[2];
2251 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002252
2253 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2254 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2255 }
2256 else
2257 {
2258 return Fail("%s: Unsupported number of operation inputs", __func__);
2259 }
2260
2261 desc.m_BiasEnabled = true;
2262 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2263
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002264 bool isSupported = false;
2265 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2266 IsDepthwiseConvolutionSupported,
2267 data.m_Backends,
2268 isSupported,
2269 inputInfo,
2270 outputInfo,
2271 desc,
2272 weights.GetInfo(),
2273 biases);
2274 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002275 {
2276 return false;
2277 }
2278
2279 armnn::IConnectableLayer* startLayer =
2280 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2281 if (!startLayer)
2282 {
2283 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2284 }
2285
2286 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2287 if (!endLayer)
2288 {
2289 return Fail("%s: ProcessActivation failed", __func__);
2290 }
2291
2292 input.Connect(startLayer->GetInputSlot(0));
2293
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002294 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002295}
2296
Mike Kelly3c673942019-07-25 09:26:06 +01002297template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002298 typename Operation = typename HalPolicy::Operation,
2299 typename Model = typename HalPolicy::Model>
2300bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002301{
Mike Kelly46272802019-08-14 17:00:48 +01002302 using Operand = typename HalPolicy::Operand;
2303
2304 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2305 if (!input.IsValid())
2306 {
2307 return Fail("%s: Operation has invalid input", __func__);
2308 }
2309
Sadik Armagan7a13acc2019-11-21 15:54:36 +00002310 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2311 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2312 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2313 {
2314 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2315 }
2316
Mike Kelly46272802019-08-14 17:00:48 +01002317 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2318 if (!outputOperand)
2319 {
2320 return Fail("%s: Operation has invalid outputs", __func__);
2321 }
2322
2323 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2324 if (IsDynamicTensor(outputInfo))
2325 {
2326 return Fail("%s: Dynamic output tensors are not supported", __func__);
2327 }
2328
2329 bool isSupported = false;
2330 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2331 IsDequantizeSupported,
2332 data.m_Backends,
2333 isSupported,
Sadik Armagan7a13acc2019-11-21 15:54:36 +00002334 inputInfo,
2335 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002336 if (!isSupported)
2337 {
2338 return false;
2339 }
2340
2341 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2342 assert(layer != nullptr);
2343 input.Connect(layer->GetInputSlot(0));
2344
2345 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2346}
2347
2348template<typename HalPolicy,
2349 typename Operation = typename HalPolicy::Operation,
2350 typename Model = typename HalPolicy::Model>
2351bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2352{
2353 using Operand = typename HalPolicy::Operand;
2354
2355 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2356 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2357
2358 if (!input0.IsValid() || !input1.IsValid())
2359 {
2360 return Fail("%s: Operation has invalid inputs", __func__);
2361 }
2362
2363 // The FuseActivation parameter is always the input index 2
2364 // and it should be optional
2365 ActivationFn activationFunction;
2366 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2367 {
2368 return Fail("%s: Operation has invalid inputs", __func__);
2369 }
2370
2371 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2372 if (!output)
2373 {
2374 return Fail("%s: Could not read output 0", __func__);
2375 }
2376
2377 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2378 if (IsDynamicTensor(outputInfo))
2379 {
2380 return Fail("%s: Dynamic output tensors are not supported", __func__);
2381 }
2382
2383 bool isSupported = false;
2384 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2385 IsDivisionSupported,
2386 data.m_Backends,
2387 isSupported,
2388 input0.GetTensorInfo(),
2389 input1.GetTensorInfo(),
2390 outputInfo);
2391 if (!isSupported)
2392 {
2393 return false;
2394 }
2395
2396 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2397 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2398
2399 if (endLayer)
2400 {
Derek Lamberti2c90f752019-12-19 15:45:35 +00002401 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002402 if (!isReshapeSupported)
2403 {
2404 return false;
2405 }
2406
Mike Kelly46272802019-08-14 17:00:48 +01002407 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2408 }
2409 return Fail("%s: ProcessActivation failed", __func__);
2410}
2411
2412template<typename HalPolicy,
2413 typename Operation = typename HalPolicy::Operation,
2414 typename Model = typename HalPolicy::Model>
2415bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2416{
2417 using Operand = typename HalPolicy::Operand;
2418
2419 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2420 if (!input.IsValid())
2421 {
2422 return Fail("%s: Operation has invalid inputs", __func__);
2423 }
2424
2425 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2426 if (!outputOperand)
2427 {
2428 return Fail("%s: Operation has invalid outputs", __func__);
2429 }
2430
2431 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2432 if (IsDynamicTensor(outputInfo))
2433 {
2434 return Fail("%s: Dynamic output tensors are not supported", __func__);
2435 }
2436
2437 bool isSupported = false;
2438 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2439 IsFloorSupported,
2440 data.m_Backends,
2441 isSupported,
2442 input.GetTensorInfo(),
2443 outputInfo);
2444 if (!isSupported)
2445 {
2446 return false;
2447 }
2448
2449 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2450 assert(layer != nullptr);
2451 input.Connect(layer->GetInputSlot(0));
2452
2453 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2454}
2455
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002456inline bool IsQSymm8(const V1_0::Operand&)
2457{
2458 return false;
2459}
2460
2461#ifdef ARMNN_ANDROID_NN_V1_2
2462
2463inline bool IsQSymm8(const V1_2::Operand& operand)
2464{
2465 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2466}
2467
2468#endif
2469
2470template<typename HalPolicy,
2471 typename Operation = typename HalPolicy::Operation,
2472 typename Model = typename HalPolicy::Model>
Sadik Armaganac23b032019-11-18 17:11:21 +00002473std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, int>
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002474DequantizeIfRequired(size_t operand_index, const Operation& operation, const Model& model, const ConversionData& data)
2475{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002476 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002477
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002478 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armaganac23b032019-11-18 17:11:21 +00002479 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002480 {
Sadik Armaganac23b032019-11-18 17:11:21 +00002481 // Invalid Operand will return with error code '-1'
2482 return { nullptr, 0, armnn::TensorInfo(), -1 };
2483 }
2484
2485 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2486 {
2487 // Weights are already constant
2488 return { nullptr, 0, armnn::TensorInfo(), 0 };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002489 }
2490
2491 const size_t weightsInputIndex = operation.inputs[operand_index];
2492
2493 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2494 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2495 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2496 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002497 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002498 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002499 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2500 {
2501 continue;
2502 }
2503
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002504 size_t outOpIndex = weightsInputIndex + 1;
2505 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002506 {
2507 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002508 }
2509
2510 if (outOpIndex != weightsInputIndex)
2511 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002512 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002513 }
2514
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002515 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002516 BOOST_ASSERT(operand);
2517
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002518 if (!IsQSymm8(*operand))
2519 {
2520 // Only supporting dequantize from QSYMM8 to FLOAT
2521 break;
2522 }
2523
2524 // Allocate a new buffer for the dequantized data and manually dequantize
2525 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2526 if (!startValue)
2527 {
2528 // Failed to get the operand address
2529 break;
2530 }
2531
2532 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2533 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002534 const float quantizationScale = operand->scale;
2535
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002536 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2537 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2538 {
2539 float* dstPtr = dequantizedBuffer.get();
2540 BOOST_ASSERT(dstPtr);
2541 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2542 }
2543
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002544 // Construct tensor info for dequantized ConstTensor
2545 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2546 operand->dimensions.data(),
2547 armnn::DataType::Float32);
2548
Sadik Armaganac23b032019-11-18 17:11:21 +00002549 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float), std::move(tensorInfo), 0 };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002550 }
2551
Sadik Armaganac23b032019-11-18 17:11:21 +00002552 return { nullptr, 0, armnn::TensorInfo() , 0};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002553}
2554
2555template<typename HalPolicy,
2556 typename Operation = typename HalPolicy::Operation,
2557 typename Model = typename HalPolicy::Model>
2558ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation& operation,
2559 const Model& model,
2560 const ConversionData& data,
2561 size_t operandIndex,
2562 bool optional = false)
2563{
2564 auto dequantized = DequantizeIfRequired<HalPolicy, Operation, Model>(operandIndex,operation, model, data);
Sadik Armaganac23b032019-11-18 17:11:21 +00002565 if (std::get<3>(dequantized) == -1)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002566 {
Sadik Armaganac23b032019-11-18 17:11:21 +00002567 // Return it as invalid, tensor with no values is not really an error
2568 return ConstTensorPin();
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002569 }
2570
Sadik Armaganac23b032019-11-18 17:11:21 +00002571 if (std::get<1>(dequantized) == 0)
2572 {
2573 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2574 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2575
2576 }
2577
2578 return ConstTensorPin(std::get<2>(dequantized), std::get<0>(dequantized).get(),
2579 std::get<1>(dequantized), g_DontPermute);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002580}
2581
2582
Mike Kelly46272802019-08-14 17:00:48 +01002583template<typename HalPolicy,
2584 typename Operation = typename HalPolicy::Operation,
2585 typename Model = typename HalPolicy::Model>
2586bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2587{
2588 using Operand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002589 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2590 if (!input.IsValid())
2591 {
2592 return Fail("%s: Operation has invalid inputs", __func__);
2593 }
2594
2595 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2596 if (!output)
2597 {
2598 return Fail("%s: Could not read output 0", __func__);
2599 }
2600
2601 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2602 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2603
2604 if (IsDynamicTensor(outputInfo))
2605 {
2606 return Fail("%s: Dynamic output tensors are not supported", __func__);
2607 }
2608
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002609 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2610 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002611
2612 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002613 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002614 return Fail("%s: Operation has invalid weights", __func__);
2615 }
2616
2617 if (!biasPin.IsValid())
2618 {
2619 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002620 }
2621
2622 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2623 armnn::ConstTensor bias = biasPin.GetConstTensor();
2624 armnn::TensorInfo reshapedInfo = inputInfo;
2625
2626 try
2627 {
2628 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002629 }
2630 catch (const std::exception& e)
2631 {
Mike Kelly46272802019-08-14 17:00:48 +01002632 return Fail("%s: %s", __func__, e.what());
2633 }
2634
2635 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2636 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2637
2638 ActivationFn activationFunction;
2639 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2640 {
2641 return Fail("%s: Operation has invalid inputs", __func__);
2642 }
2643
2644 armnn::FullyConnectedDescriptor desc;
2645 desc.m_TransposeWeightMatrix = true;
2646 desc.m_BiasEnabled = true;
2647
FinnWilliamsArm42defa62020-01-08 14:57:47 +00002648 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2649 weights.GetInfo().GetShape(),
2650 outputInfo.GetShape(),
2651 desc.m_TransposeWeightMatrix))
2652 {
2653 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2654 }
2655
Mike Kelly46272802019-08-14 17:00:48 +01002656 bool isSupported = false;
2657 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2658 IsFullyConnectedSupported,
2659 data.m_Backends,
2660 isSupported,
2661 reshapedInfo,
2662 outputInfo,
2663 weights.GetInfo(),
2664 bias.GetInfo(),
2665 desc);
2666 if (!isSupported)
2667 {
2668 return false;
2669 }
2670
2671 armnn::IConnectableLayer* startLayer =
2672 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2673 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2674
2675 if (endLayer != nullptr)
2676 {
2677 if (inputInfo.GetNumDimensions() > 2U)
2678 {
2679 armnn::ReshapeDescriptor reshapeDescriptor;
2680 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2681
2682 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2683 assert(reshapeLayer != nullptr);
2684 input.Connect(reshapeLayer->GetInputSlot(0));
2685 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2686 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2687 }
2688 else
2689 {
2690 input.Connect(startLayer->GetInputSlot(0));
2691 }
2692
2693 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2694 }
2695 else
2696 {
2697 return Fail("%s: ProcessActivation failed", __func__);
2698 }
2699}
2700
2701template<typename HalPolicy,
2702 typename Operation = typename HalPolicy::Operation,
2703 typename Model = typename HalPolicy::Model>
2704bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2705{
Mike Kelly999e2092019-08-15 10:46:46 +01002706 if (operation.inputs.size() != 1)
2707 {
2708 return Fail("%s: Optional inputs are not supported", __func__);
2709 }
2710
Mike Kelly46272802019-08-14 17:00:48 +01002711 using Operand = typename HalPolicy::Operand;
2712
2713 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2714 if (!input.IsValid())
2715 {
2716 return Fail("%s: Operation has invalid inputs", __func__);
2717 }
2718
2719 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2720 if (!output)
2721 {
2722 return Fail("%s: Could not read output 0", __func__);
2723 }
2724
2725 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2726 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2727
2728 if (IsDynamicTensor(outputInfo))
2729 {
2730 return Fail("%s: Dynamic output tensors are not supported", __func__);
2731 }
2732 if (outputInfo.GetNumDimensions() != 4u)
2733 {
2734 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2735 }
2736
2737 armnn::L2NormalizationDescriptor desc;
2738 desc.m_DataLayout = armnn::DataLayout::NHWC;
2739
2740 bool isSupported = false;
2741 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2742 IsL2NormalizationSupported,
2743 data.m_Backends,
2744 isSupported,
2745 inputInfo,
2746 outputInfo,
2747 desc);
2748 if (!isSupported)
2749 {
2750 return false;
2751 }
2752
2753 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2754 assert(layer != nullptr);
2755 input.Connect(layer->GetInputSlot(0));
2756
2757 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2758}
2759
2760template<typename HalPolicy,
2761 typename Operation = typename HalPolicy::Operation,
2762 typename Model = typename HalPolicy::Model>
2763bool ConvertLocalResponseNormalization(const Operation& operation,
2764 const Model& model,
2765 ConversionData& data)
2766{
Mike Kelly999e2092019-08-15 10:46:46 +01002767 if (operation.inputs.size() != 5)
2768 {
2769 return Fail("%s: Optional inputs are not supported", __func__);
2770 }
2771
Mike Kelly46272802019-08-14 17:00:48 +01002772 using Operand = typename HalPolicy::Operand;
2773 using OperandType = typename HalPolicy::OperandType;
2774
2775 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2776 if (!input.IsValid())
2777 {
2778 return Fail("%s: Operation has invalid inputs", __func__);
2779 }
2780
2781 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2782 if (!output)
2783 {
2784 return Fail("%s: Could not read output 0", __func__);
2785 }
2786
2787 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2788 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2789
2790 if (IsDynamicTensor(outputInfo))
2791 {
2792 return Fail("%s: Dynamic output tensors are not supported", __func__);
2793 }
2794 if (outputInfo.GetNumDimensions() != 4u)
2795 {
2796 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2797 }
2798
2799 armnn::NormalizationDescriptor descriptor;
2800 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2801 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2802 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2803
2804 if (!input.IsValid() ||
2805 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2806 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2807 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2808 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2809 {
2810 return Fail("%s: Operation has invalid inputs", __func__);
2811 }
2812
2813 // ArmNN expects normSize to be the full size of the normalization
2814 // window rather than the radius as in AndroidNN.
2815 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2816
2817 bool isSupported = false;
2818 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2819 IsNormalizationSupported,
2820 data.m_Backends,
2821 isSupported,
2822 inputInfo,
2823 outputInfo,
2824 descriptor);
2825 if (!isSupported)
2826 {
2827 return false;
2828 }
2829
2830
2831 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2832 assert(layer != nullptr);
2833 input.Connect(layer->GetInputSlot(0));
2834
2835 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2836}
2837
2838template<typename HalPolicy,
2839 typename Operation = typename HalPolicy::Operation,
2840 typename Model = typename HalPolicy::Model>
2841bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2842{
2843 using Operand = typename HalPolicy::Operand;
2844
2845 armnn::ActivationDescriptor desc;
2846 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2847
2848 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2849}
2850
2851template<typename HalPolicy,
2852 typename Operation = typename HalPolicy::Operation,
2853 typename Model = typename HalPolicy::Model>
2854bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2855{
2856 using Operand = typename HalPolicy::Operand;
2857
2858 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2859 if (!input.IsValid())
2860 {
2861 return Fail("%s: Operation has invalid inputs", __func__);
2862 }
2863
2864 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2865 if (!output)
2866 {
2867 return Fail("%s: Could not read output 0", __func__);
2868 }
2869
2870 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2871 if (IsDynamicTensor(outputInfo))
2872 {
2873 return Fail("%s: Dynamic output tensors are not supported", __func__);
2874 }
2875
2876 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2877 if (!axisOperand)
2878 {
2879 return Fail("%s: Could not read input 1", __func__);
2880 }
2881
2882 std::vector<int32_t> axis;
2883 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2884 {
2885 return Fail("%s: Input 1 has invalid values", __func__);
2886 }
2887
2888 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2889
2890 // Convert the axis to unsigned int and remove duplicates.
2891 unsigned int rank = inputInfo.GetNumDimensions();
2892 std::set<unsigned int> uniqueAxis;
2893 std::transform(axis.begin(), axis.end(),
2894 std::inserter(uniqueAxis, uniqueAxis.begin()),
2895 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2896
2897 // Get the "keep dims" flag.
2898 int32_t keepDims = 0;
2899 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2900 {
2901 return Fail("%s: Could not read input 2", __func__);
2902 }
2903
2904 armnn::MeanDescriptor descriptor;
2905 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2906 descriptor.m_KeepDims = keepDims > 0;
2907
2908 bool isSupported = false;
2909 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2910 IsMeanSupported,
2911 data.m_Backends,
2912 isSupported,
2913 inputInfo,
2914 outputInfo,
2915 descriptor);
2916 if (!isSupported)
2917 {
2918 return false;
2919 }
2920
2921 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2922 assert(layer != nullptr);
2923 input.Connect(layer->GetInputSlot(0));
2924
2925 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2926}
2927
2928template<typename HalPolicy,
2929 typename Operation = typename HalPolicy::Operation,
2930 typename Model = typename HalPolicy::Model>
2931bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2932{
2933 using Operand = typename HalPolicy::Operand;
2934
2935 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2936 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2937
2938 if (!input0.IsValid() || !input1.IsValid())
2939 {
2940 return Fail("%s: Operation has invalid inputs", __func__);
2941 }
2942
2943 // The FuseActivation parameter is always the input index 2
2944 // and it should be optional
2945 ActivationFn activationFunction;
2946 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2947 {
2948 return Fail("%s: Operation has invalid inputs", __func__);
2949 }
2950
2951 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2952
2953 if (outputOperand == nullptr)
2954 {
2955 return false;
2956 }
2957
2958 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2959 if (IsDynamicTensor(outputInfo))
2960 {
2961 return Fail("%s: Dynamic output tensors are not supported", __func__);
2962 }
2963
2964 bool isSupported = false;
2965 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2966 IsMultiplicationSupported,
2967 data.m_Backends,
2968 isSupported,
2969 input0.GetTensorInfo(),
2970 input1.GetTensorInfo(),
2971 outputInfo);
2972 if (!isSupported)
2973 {
2974 return false;
2975 }
2976
2977 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2978 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2979
2980 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2981 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2982
2983 if (endLayer != nullptr)
2984 {
Derek Lamberti2c90f752019-12-19 15:45:35 +00002985 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002986 if (!isReshapeSupported)
2987 {
2988 return false;
2989 }
2990
Mike Kelly46272802019-08-14 17:00:48 +01002991 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2992 }
2993 else
2994 {
2995 return Fail("%s: ProcessActivation failed", __func__);
2996 }
2997}
2998
2999template<typename HalPolicy,
3000 typename Operation = typename HalPolicy::Operation,
3001 typename Model = typename HalPolicy::Model>
3002bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
3003{
3004 using Operand = typename HalPolicy::Operand;
3005
Mike Kelly3c673942019-07-25 09:26:06 +01003006 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3007 if (!input.IsValid())
3008 {
3009 return Fail("%s: Operation has invalid inputs", __func__);
3010 }
3011
3012 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3013 unsigned int rank = inputInfo.GetNumDimensions();
3014
3015 armnn::PadDescriptor descriptor;
3016 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3017 {
3018 return Fail("%s: Could not convert paddings", __func__);
3019 }
3020
3021 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3022 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3023 // (QuantizationOffset - QuantizationOffset) * scale = 0.
3024 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
3025 {
3026 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3027 }
3028
Mike Kelly46272802019-08-14 17:00:48 +01003029 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003030 if (!output)
3031 {
3032 return Fail("%s: Could not read output", __func__);
3033 }
3034
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003035 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003036 if (IsDynamicTensor(outputInfo))
3037 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003038 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003039 }
3040
3041 bool isSupported = false;
3042 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3043 IsPadSupported,
3044 data.m_Backends,
3045 isSupported,
3046 inputInfo,
3047 outputInfo,
3048 descriptor);
3049 if (!isSupported)
3050 {
3051 return false;
3052 }
3053
3054 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3055 assert(layer != nullptr);
3056 input.Connect(layer->GetInputSlot(0));
3057 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3058
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003059 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003060}
3061
Mike Kelly0a879362019-07-29 16:56:31 +01003062template<typename HalPolicy,
3063 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01003064 typename Model = typename HalPolicy::Model>
3065bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
3066{
3067 using Operand = typename HalPolicy::Operand;
3068
3069 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3070 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3071 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
3072
3073 if (inputOperand == nullptr
3074 || requestedShapeOperand == nullptr
3075 || outputOperand == nullptr)
3076 {
3077 return Fail("%s: Operation has invalid inputs", __func__);
3078 }
3079
3080 if (requestedShapeOperand->dimensions.size() != 1)
3081 {
3082 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3083 __func__, requestedShapeOperand->dimensions.size());
3084 }
3085
3086 std::vector<int32_t> targetDimensions;
3087 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3088 {
3089 return Fail("%s: Could not read values of input 1", __func__);
3090 }
3091
3092 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3093
3094 Shape requestedShape;
3095 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3096 // function that resolves these values into a fully specified tensor shape.
3097 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3098 {
3099 return Fail("%s: Failed to resolve the requested shape", __func__);
3100 }
3101
3102 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3103 if (!SameShape(requestedShape, outputOperandShape))
3104 {
3105 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3106 }
3107
3108 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3109 if (!input.IsValid())
3110 {
3111 return Fail("%s: Could not read input 0", __func__);
3112 }
3113
3114 armnn::ReshapeDescriptor reshapeDescriptor;
3115 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3116 requestedShape.dimensions.data());
3117
3118 bool isSupported = false;
3119 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3120 IsReshapeSupported,
3121 data.m_Backends,
3122 isSupported,
3123 input.GetTensorInfo(),
Kevin Maydbbcc392019-12-12 16:33:31 +00003124 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003125 reshapeDescriptor);
3126 if (!isSupported)
3127 {
3128 return false;
3129 }
3130
3131 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3132 assert(layer != nullptr);
3133 input.Connect(layer->GetInputSlot(0));
3134
3135 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3136}
3137
3138template<typename HalPolicy,
3139 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01003140 typename Model = typename HalPolicy::Model>
3141bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
3142{
Mike Kelly46272802019-08-14 17:00:48 +01003143 using Operand = typename HalPolicy::Operand;
3144
Mike Kelly0a879362019-07-29 16:56:31 +01003145 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3146 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3147
3148 if (!input0.IsValid() || !input1.IsValid())
3149 {
3150 return Fail("%s: Operation has invalid inputs", __func__);
3151 }
3152
3153 // The FuseActivation parameter is always the input index 2
3154 // and it should be optional
3155 ActivationFn activationFunction;
3156 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3157 {
3158 return Fail("%s: Operation has invalid inputs", __func__);
3159 }
3160
3161 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3162 if (!output)
3163 {
3164 return Fail("%s: Could not read output 0", __func__);
3165 }
3166
3167 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3168 if (IsDynamicTensor(outputInfo))
3169 {
3170 return Fail("%s: Dynamic output tensors are not supported", __func__);
3171 }
3172
3173 bool isSupported = false;
3174 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3175 IsSubtractionSupported,
3176 data.m_Backends,
3177 isSupported,
3178 input0.GetTensorInfo(),
3179 input1.GetTensorInfo(),
3180 outputInfo);
3181 if (!isSupported)
3182 {
3183 return false;
3184 }
3185
3186 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3187 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3188
3189 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3190 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3191
3192 if (endLayer)
3193 {
Derek Lamberti2c90f752019-12-19 15:45:35 +00003194 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003195 if (!isReshapeSupported)
3196 {
3197 return false;
3198 }
Mike Kelly0a879362019-07-29 16:56:31 +01003199 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3200 }
3201
3202 return Fail("%s: ProcessActivation failed", __func__);
3203}
3204
Finn Williams23b87b32019-07-30 11:44:05 +01003205template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01003206 typename Operation = typename HalPolicy::Operation,
3207 typename Model = typename HalPolicy::Model>
3208bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
3209{
3210 using Operand = typename HalPolicy::Operand;
3211
3212 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3213 if (!input.IsValid())
3214 {
3215 return Fail("%s: Operation has invalid inputs", __func__);
3216 }
3217
3218 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3219 unsigned int rank = inputInfo.GetNumDimensions();
3220 if (rank > 4)
3221 {
3222 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3223 }
3224
3225 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3226 if (!output)
3227 {
3228 return Fail("%s: Could not read output 0", __func__);
3229 }
3230
3231 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3232 {
3233 return Fail("%s: Dynamic output tensors are not supported", __func__);
3234 }
3235
3236 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3237 // if the operand index is out of bounds.
3238 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3239
3240 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3241
3242 std::vector<int32_t> axis;
3243 if (!axisOperand)
3244 {
3245 axis.assign(dimensionSequence,
3246 dimensionSequence + rank);
3247 }
3248 else
3249 {
3250 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3251 }
3252
3253 std::vector<uint32_t> outputDims;
3254 for (unsigned int i = 0; i < rank; i++)
3255 {
3256 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3257 auto currentDimension = inputInfo.GetShape()[i];
3258 if (skipSqueeze || currentDimension != 1)
3259 {
3260 outputDims.push_back(currentDimension);
3261 }
3262 }
3263
3264 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3265
3266 armnn::TensorInfo outputInfo = inputInfo;
3267 outputInfo.SetShape(outShape);
3268
3269 armnn::ReshapeDescriptor reshapeDesc;
3270 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3271
3272 bool isSupported = false;
3273 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3274 IsReshapeSupported,
3275 data.m_Backends,
3276 isSupported,
3277 inputInfo,
Kevin Maydbbcc392019-12-12 16:33:31 +00003278 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003279 reshapeDesc);
3280 if (!isSupported)
3281 {
3282 return false;
3283 }
3284
3285 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3286 assert(layer != nullptr);
3287 input.Connect(layer->GetInputSlot(0));
3288
3289 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3290}
3291
3292template<typename HalPolicy,
3293 typename Operation = typename HalPolicy::Operation,
3294 typename Model = typename HalPolicy::Model>
3295bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
3296{
3297 using Operand = typename HalPolicy::Operand;
3298
3299 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3300 if (!input.IsValid())
3301 {
3302 return Fail("%s: Operation has invalid inputs", __func__);
3303 }
3304
3305 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3306 unsigned int rank = inputInfo.GetNumDimensions();
3307 if (rank > 4)
3308 {
3309 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3310 }
3311
3312 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3313 if (!output)
3314 {
3315 return Fail("%s: Could not read output 0", __func__);
3316 }
3317
3318 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3319 if (IsDynamicTensor(outputInfo))
3320 {
3321 return Fail("%s: Dynamic output tensors are not supported", __func__);
3322 }
3323
3324 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3325 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3326 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3327
3328 std::vector<int32_t> beginValues;
3329 std::vector<int32_t> endValues;
3330 std::vector<int32_t> stridesValues;
3331
3332 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3333 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
3334 {
3335 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3336 {
3337 return false;
3338 }
3339
3340 if (operandValues.size() != rank)
3341 {
3342 return false;
3343 }
3344
3345 return true;
3346 };
3347
3348 if (!ValidateInputOperands(*beginOperand, beginValues)
3349 || !ValidateInputOperands(*endOperand, endValues)
3350 || !ValidateInputOperands(*stridesOperand, stridesValues))
3351 {
3352 return Fail("%s: Operation has invalid input operand", __func__);
3353 }
3354
3355 // Stride cannot have value '0'
3356 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3357 {
3358 return Fail("%s: Stride must be non-zero value.", __func__);
3359 }
3360
3361 armnn::StridedSliceDescriptor descriptor;
3362 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3363 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3364 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3365 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3366
3367 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3368 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3369 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3370 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3371 {
3372 return Fail("%s: Operation has invalid inputs", __func__);
3373 }
3374
3375 bool isSupported = false;
3376 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3377 IsStridedSliceSupported,
3378 data.m_Backends,
3379 isSupported,
3380 inputInfo,
3381 outputInfo,
3382 descriptor);
3383 if (!isSupported)
3384 {
3385 return false;
3386 }
3387
3388 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3389 assert(layer != nullptr);
3390 input.Connect(layer->GetInputSlot(0));
3391
3392 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3393}
3394
3395template<typename HalPolicy,
3396 typename Operation = typename HalPolicy::Operation,
3397 typename Model = typename HalPolicy::Model>
3398bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3399{
3400 using Operand = typename HalPolicy::Operand;
3401
3402 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3403 if (!input.IsValid())
3404 {
3405 return Fail("%s: Operation has invalid inputs", __func__);
3406 }
3407
3408 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3409 unsigned int rank = inputInfo.GetNumDimensions();
3410 if (rank > 4)
3411 {
3412 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3413 }
3414
3415 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3416 // if the operand index is out of bounds.
3417 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3418
3419 std::vector<int32_t> perm(rank);
3420 if (!permOperand)
3421 {
3422 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3423 for (unsigned int i = rank; i > 0; i--)
3424 {
3425 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3426 }
3427 }
3428 else
3429 {
3430 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3431 }
3432
3433 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3434
3435 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3436 if (!permutationVector.IsEqual(NHWCToArmNN)
3437 && !permutationVector.IsEqual(ArmNNToNHWC)
3438 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3439 {
3440 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3441 }
3442
3443 armnn::PermuteDescriptor permuteDesc;
3444 permuteDesc.m_DimMappings = permutationVector;
3445
3446 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3447 if (!output)
3448 {
3449 return Fail("%s: Could not read output 0", __func__);
3450 }
3451
3452 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3453
3454 bool isSupported = false;
3455 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3456 IsPermuteSupported,
3457 data.m_Backends,
3458 isSupported,
3459 inputInfo,
3460 outputInfo,
3461 permuteDesc);
3462 if (!isSupported)
3463 {
3464 return false;
3465 }
3466
3467 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3468 assert(layer != nullptr);
3469 input.Connect(layer->GetInputSlot(0));
3470
3471 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3472}
3473
3474template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003475 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003476 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003477 typename HalModel = typename HalPolicy::Model>
3478bool ConvertBatchToSpaceNd(const HalOperation& operation,
3479 const HalModel& model,
3480 ConversionData& data)
3481{
Finn Williams23b87b32019-07-30 11:44:05 +01003482
3483 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3484 if (!input.IsValid())
3485 {
3486 return Fail("%s: Operation has invalid inputs", __func__);
3487 }
3488
3489 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3490 if (!output)
3491 {
3492 return Fail("%s: Could not read output 0", __func__);
3493 }
3494
3495 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3496 if (IsDynamicTensor(outputInfo))
3497 {
3498 return Fail("%s: Dynamic output tensors are not supported", __func__);
3499 }
3500
3501 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3502 if (!blockOperand)
3503 {
3504 return Fail("%s: Could not read input 1", __func__);
3505 }
3506
3507 // Convert the block operand to int32
3508 std::vector<int32_t> block;
3509 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3510 {
3511 return Fail("%s: Input 1 has invalid values", __func__);
3512 }
3513
3514 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3515
3516 unsigned int rank = inputInfo.GetNumDimensions();
3517 if (rank != 4)
3518 {
3519 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3520 }
3521
3522 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3523 {
3524 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3525 " greater than or equal to 1", __func__);
3526 }
3527
3528 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3529 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3530 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3531
3532 if (Is12Operand(*output))
3533 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003534 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003535 }
3536 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3537 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3538
3539 bool isSupported = false;
3540 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3541 IsBatchToSpaceNdSupported,
3542 data.m_Backends,
3543 isSupported,
3544 inputInfo,
3545 outputInfo,
3546 batchToSpaceNdDesc);
3547 if (!isSupported)
3548 {
3549 return false;
3550 }
3551
3552 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3553 assert(layer != nullptr);
3554 input.Connect(layer->GetInputSlot(0));
3555
3556 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3557}
Mike Kelly0a879362019-07-29 16:56:31 +01003558
Finn Williamsd74c5052019-07-30 17:06:00 +01003559template<typename HalPolicy,
3560 typename HalOperation = typename HalPolicy::Operation,
3561 typename HalOperand = typename HalPolicy::Operand,
3562 typename HalModel = typename HalPolicy::Model>
3563bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3564{
3565 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3566 if (!input.IsValid())
3567 {
3568 return Fail("%s: Operation has invalid inputs", __func__);
3569 }
3570
3571 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3572 unsigned int rank = inputInfo.GetNumDimensions();
3573 unsigned int spatialDim = rank - 2;
3574
3575 if (rank != 4)
3576 {
3577 Fail("%s: Only inputs with rank 4 are supported", __func__);
3578 }
3579
3580 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3581 if (!output)
3582 {
3583 return Fail("%s: Could not read output 0", __func__);
3584 }
3585
3586 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3587 if (IsDynamicTensor(outputInfo))
3588 {
3589 return Fail("%s: Dynamic output tensors are not supported", __func__);
3590 }
3591
3592 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3593 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3594
3595 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3596 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3597 {
3598 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3599 }
3600
3601 std::vector<int32_t> blockShape;
3602 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3603 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3604 {
3605 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3606 }
3607
3608 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3609 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3610 {
3611 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3612 }
3613
3614 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3615 std::vector<int32_t> paddings;
3616 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3617 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3618 {
3619 int paddingBeforeInput = paddings[i];
3620 int paddingAfterInput = paddings[i + 1];
3621 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3622 {
3623 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3624 }
3625
3626 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3627 }
3628
3629 armnn::SpaceToBatchNdDescriptor descriptor;
3630 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3631 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3632 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3633
3634 if (Is12Operand(*output))
3635 {
3636 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3637 }
3638
3639 bool isSupported = false;
3640 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3641 IsSpaceToBatchNdSupported,
3642 data.m_Backends,
3643 isSupported,
3644 inputInfo,
3645 outputInfo,
3646 descriptor);
3647 if (!isSupported)
3648 {
3649 return false;
3650 }
3651
3652 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3653 assert(layer != nullptr);
3654 input.Connect(layer->GetInputSlot(0));
3655
3656 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3657}
3658
Kevin May407718f2019-09-09 14:46:41 +01003659template<typename HalPolicy,
3660 typename HalOperation = typename HalPolicy::Operation,
3661 typename HalModel = typename HalPolicy::Model>
3662bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3663{
3664 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3665
3666 if (!input.IsValid())
3667 {
3668 return Fail("%s: Operation has invalid input", __func__);
3669 }
3670
3671 using HalOperand = typename HalPolicy::Operand;
3672 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3673 if (!output)
3674 {
3675 return Fail("%s: Could not read output 0", __func__);
3676 }
3677
3678 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3679 if (IsDynamicTensor(outputInfo))
3680 {
3681 return Fail("%s: Dynamic output tensors are not supported", __func__);
3682 }
3683
3684 bool isSupported = false;
3685 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3686 IsAbsSupported,
3687 data.m_Backends,
3688 isSupported,
3689 input.GetTensorInfo(),
3690 outputInfo);
3691
3692 if (!isSupported)
3693 {
3694 return false;
3695 }
3696
3697 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3698 assert(layer != nullptr);
3699 input.Connect(layer->GetInputSlot(0));
3700
3701 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3702}
3703
3704
saoste01b8471482018-10-10 09:44:51 +01003705} // namespace armnn_driver