blob: a1405fbbbe7d01d9c2ede59da32bc34b532d5271 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
190 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
191 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100192 type == V1_2::OperandType::TENSOR_INT32;
193}
194
195#endif
196
197inline bool IsBool(V1_0::Operand)
198{
199 return false;
200}
201
Sadik Armagan61113162019-07-25 09:09:40 +0100202inline bool Is12Operand(V1_0::Operand)
203{
204 return false;
205}
206
Mike Kellyb5fdf382019-06-11 16:35:25 +0100207#ifdef ARMNN_ANDROID_NN_V1_2
208
209inline bool IsBool(V1_2::Operand operand)
210{
211 return operand.type == V1_2::OperandType::BOOL;
212}
213
Sadik Armagan61113162019-07-25 09:09:40 +0100214/// Checks if a operand is 1_2 Operand
215inline bool Is12Operand(V1_2::Operand)
216{
217 return true;
218}
219
Mike Kellyb5fdf382019-06-11 16:35:25 +0100220#endif
221
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100222template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000223armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
224 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100225 armnn::TensorInfo reshapeInfo)
226{
227 armnn::ReshapeDescriptor reshapeDescriptor;
228 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
229
230 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
231 BOOST_ASSERT(reshapeLayer != nullptr);
232
233 // Attach the input layer to the reshape layer
234 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
235 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
236
237 return *reshapeLayer;
238}
239
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000240bool BroadcastTensor(LayerInputHandle& input0,
241 LayerInputHandle& input1,
242 armnn::IConnectableLayer* startLayer,
243 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100244{
245 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100246
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100247 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
248 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
249
250 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
251 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
252
253 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100254 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100255 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
256 input0.Connect(startLayer->GetInputSlot(0));
257 input1.Connect(startLayer->GetInputSlot(1));
258
Sadik Armagan64b19b52019-08-19 09:49:58 +0100259 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100260 }
261
262 // Since the number of dimensions do not match then we need to add degenerate dimensions
263 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
264
265 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
266 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
267 boost::numeric_cast<int>(inputDimensions1));
268
269 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
270 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
271 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
272
273 const armnn::TensorShape& smallShape = smallInfo.GetShape();
274 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
275 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
276 {
277 reshapedDimensions[i] = smallShape[i - sizeDifference];
278 }
279
280 armnn::TensorInfo reshapedInfo = smallInfo;
281 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
282 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100283
284 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
285 armnn::ReshapeDescriptor reshapeDescriptor;
286
287 bool isSupported = false;
288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
289 IsReshapeSupported,
290 data.m_Backends,
291 isSupported,
292 reshapedInfo,
293 reshapeDescriptor);
294 if (!isSupported)
295 {
296 return false;
297 }
298
299 BOOST_ASSERT(data.m_Network != nullptr);
300 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100301
302 if (input0IsSmaller)
303 {
304 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
305 //
306 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100307 // | |
308 // Reshape |
309 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100311
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100312 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
313 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100314 }
315 else
316 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100317 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
318 //
319 // Input0 Input1
320 // | |
321 // | Reshape
322 // \ /
323 // StartLayer
324
arovir01b0717b52018-09-05 17:03:25 +0100325 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100326 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100327 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100328
329 return true;
arovir01b0717b52018-09-05 17:03:25 +0100330}
331
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000332void CalcPadding(uint32_t input,
333 uint32_t kernel,
334 uint32_t stride,
335 uint32_t& outPadHead,
336 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100337 android::nn::PaddingScheme scheme)
338{
339 int32_t padHead;
340 int32_t padTail;
341 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
342 outPadHead = boost::numeric_cast<uint32_t>(padHead);
343 outPadTail = boost::numeric_cast<uint32_t>(padTail);
344}
345
Mike Kelly86b36d42019-07-12 16:39:33 +0100346#ifdef ARMNN_ANDROID_NN_V1_2
347
348void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
349 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
350{
351 int32_t padHead;
352 int32_t padTail;
353 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
354 outPadHead = boost::numeric_cast<uint32_t>(padHead);
355 outPadTail = boost::numeric_cast<uint32_t>(padTail);
356}
357
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100358void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
359 int32_t& outPadTail, android::nn::PaddingScheme scheme)
360{
361 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
362}
363
Mike Kelly86b36d42019-07-12 16:39:33 +0100364#endif
365
Matthew Bentham912b3622019-05-03 15:49:14 +0100366Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100367{
368 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100369 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100370 shape.dimensions = operand.dimensions;
371 shape.scale = operand.scale;
372 shape.offset = operand.zeroPoint;
373 return shape;
374}
375
Mike Kelly46272802019-08-14 17:00:48 +0100376#ifdef ARMNN_ANDROID_NN_V1_2
377
378Shape GetOperandShape(const V1_2::Operand& operand)
379{
380 Shape shape;
381 shape.type = OperandType(operand.type);
382 shape.dimensions = operand.dimensions;
383 shape.scale = operand.scale;
384 shape.offset = operand.zeroPoint;
385 return shape;
386}
387
388#endif
389
arovir01b0717b52018-09-05 17:03:25 +0100390// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
391// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100392// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
393// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100394void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000395 const armnn::TensorInfo& weightInfo,
396 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100397{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100399 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000400 // NOTE: Bias scale is always set to 0 for per-axis quantization and
401 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
402 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100403 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000404 return biasScale * inputInfo.GetQuantizationScale();
405 };
406
407 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
408 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
409
410 biasInfo.SetQuantizationScales(biasScales);
411 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
412
413 ALOGV("Bias quantization params have been updated for per-axis quantization");
414 }
415 else
416 {
417 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
418 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
419 {
420 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
421 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
422 {
423 ALOGW("Bias quantization scale has been modified to match input * weights");
424 biasInfo.SetQuantizationScale(expectedBiasScale);
425 }
arovir01b0717b52018-09-05 17:03:25 +0100426 }
427 }
428}
429
430// 4D Tensor Permutations
431const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
432const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
433const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
434const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
435
436// 3D Permutation Vectors
437const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
438const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
439const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
440
441template<typename OSlot>
442armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
443 const armnn::PermutationVector& mappings)
444{
445 // Add swizzle layer
446 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
447
448 BOOST_ASSERT(layer != nullptr);
449
450 // Connect input to swizzle layer
451 input.Connect(layer->GetInputSlot(0));
452
453 // Setup swizzled output
454 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
455 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
456
457 return *layer;
458}
459
460void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
461{
462 // Add swizzle layer
463 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
464 // Connect swizzled input to layer
465 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
466}
467
468armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
469{
470 // Add deswizzle layer
471 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
472 return deswizzleLayer;
473}
474
475// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
476armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
477 LayerInputHandle& input,
478 armnn::IConnectableLayer& firstLayer,
479 armnn::IConnectableLayer& lastLayer)
480{
481 SwizzleIn(network, input, firstLayer, 0);
482 return DeswizzleOut(network, lastLayer, 0);
483}
484
485// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
486armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
487 armnn::IConnectableLayer& layer)
488{
489 return SwizzleInDeswizzleOut(network, input, layer, layer);
490}
491
492bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
493 const armnn::TensorShape & outputShape,
494 uint32_t concatDim)
495{
496 // Validate the output shape is correct given the input shapes (which have just been validated)
497 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
498 if (outputShape.GetNumDimensions() != numDimensions)
499 {
500 return Fail("%s: Output shape has wrong number of dimensions", __func__);
501 }
502
503 unsigned int outputSizeAlongConcatenatedDimension = 0;
504 for (unsigned int i = 0; i < inputShapes.size(); i++)
505 {
506 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
507 }
508
509 for (unsigned int i = 0; i < numDimensions; ++i)
510 {
511 if (i == concatDim)
512 {
513 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
514 {
515 return Fail(
516 "%s: Invalid output shape for dimension %d (%d != %d)",
517 __func__,
518 i,
519 outputShape[i],
520 outputSizeAlongConcatenatedDimension);
521 }
522 }
523 else
524 {
525 if (outputShape[i] != inputShapes[0][i])
526 {
527 return Fail("%s: Invalid output shape", __func__);
528 }
529 }
530 }
531
532 return true;
533}
534
535bool RequiresReshape(armnn::TensorShape & inputShape)
536{
537 return inputShape.GetNumDimensions() < 3;
538}
539
arovir01b0717b52018-09-05 17:03:25 +0100540void SwizzleInputs(armnn::INetwork& network,
541 std::vector<LayerInputHandle>& inputs,
542 std::vector<armnn::TensorShape>& inputShapes,
543 const armnn::PermutationVector& mapping)
544{
545 if (!mapping.IsEqual(IdentityPermutation4D))
546 {
547 size_t nInputs = inputs.size();
548 for (size_t i=0; i<nInputs; ++i)
549 {
550 // add swizzle layer
551 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
552 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
553 auto& outputInfo = outputSlot.GetTensorInfo();
554 // replace inputs with the swizzled ones
555 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
556 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
557 }
558 }
559}
560
narpra01f176d5a2018-11-18 20:17:48 +0000561bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
562 int32_t & concatDimension,
563 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100564{
narpra01f176d5a2018-11-18 20:17:48 +0000565 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100566 BOOST_ASSERT(numberOfDimensions >= 3);
567
568 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000569 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
570 // or along dimension 0 or 2 for a 3-D tensor.
571 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100572 {
narpra01f176d5a2018-11-18 20:17:48 +0000573 concatDimension = 1;
574 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
575 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100576 }
narpra01f176d5a2018-11-18 20:17:48 +0000577 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100578 {
narpra01f176d5a2018-11-18 20:17:48 +0000579 concatDimension = 0;
580 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
581 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100582 }
narpra01f176d5a2018-11-18 20:17:48 +0000583 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100584}
585
586} // anonymous namespace
587
588namespace armnn_driver
589{
590
591//// Creates an ArmNN activation layer and connects it to the given layer, if the
592//// passed in AndroidNN activation function requires so.
593//// @return The end layer of the sequence of layers built for the given AndroidNN
594//// activation function or nullptr if an error occurred (e.g. unsupported activation).
595//// Note that the end layer matches the input layer if no activation is required
596//// (the sequence of layers has length 1).
597armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
598 ActivationFn activation,
599 armnn::IConnectableLayer* prevLayer,
600 ConversionData& data);
601
602} // namespace armnn_driver
603
604///
605/// Utility templates
606///
607
608namespace armnn_driver
609{
610
611using namespace android::nn;
612
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100613template<typename HalPolicy,
614 typename HalOperand = typename HalPolicy::Operand,
615 typename HalOperation = typename HalPolicy::Operation,
616 typename HalModel = typename HalPolicy::Model>
617const HalOperand* GetInputOperand(const HalOperation& operation,
618 uint32_t inputIndex,
619 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100620 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100621{
622 if (inputIndex >= operation.inputs.size())
623 {
saoste01b8471482018-10-10 09:44:51 +0100624 if (failOnIndexOutOfBounds)
625 {
626 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
627 }
arovir01b0717b52018-09-05 17:03:25 +0100628 return nullptr;
629 }
630
631 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
632 return &model.operands[operation.inputs[inputIndex]];
633}
634
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100635template<typename HalPolicy,
636 typename HalOperand = typename HalPolicy::Operand,
637 typename HalOperation = typename HalPolicy::Operation,
638 typename HalModel = typename HalPolicy::Model>
639const HalOperand* GetOutputOperand(const HalOperation& operation,
640 uint32_t outputIndex,
641 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100642{
643 if (outputIndex >= operation.outputs.size())
644 {
645 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
646 return nullptr;
647 }
648
649 // Model should have been validated beforehand
650 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
651
652 return &model.operands[operation.outputs[outputIndex]];
653}
654
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100656 typename HalOperand = typename HalPolicy::Operand,
657 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100658const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100659 const HalModel& model,
660 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000661 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100662{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100663 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100664
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100666 switch (operand.lifetime)
667 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100668 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100669 {
670 // Constant found in model.operandValues
671 valueStart = &model.operandValues[operand.location.offset];
672 break;
673 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100675 {
676 // Constant specified via a Memory object
677 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
678 break;
679 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100680 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000681 {
682 // An optional input tensor with no values is not an error so should not register as a fail
683 if (optional)
684 {
685 valueStart = nullptr;
686 break;
687 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100688 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000689 }
arovir01b0717b52018-09-05 17:03:25 +0100690 default:
691 {
692 // Unsupported/invalid (e.g. can't get value of an input to the model)
693 Fail("%s: unsupported/invalid operand lifetime: %s",
694 __func__, toString(operand.lifetime).c_str());
695 valueStart = nullptr;
696 }
697 }
698
699 return valueStart;
700}
701
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100702template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100703 typename HalOperation = typename HalPolicy::Operation,
704 typename HalModel = typename HalPolicy::Model,
705 typename HalOperandType = typename HalPolicy::OperandType>
706bool GetOperandType(const HalOperation& operation,
707 uint32_t inputIndex,
708 const HalModel& model,
709 HalOperandType& type)
710{
711 using HalOperand = typename HalPolicy::Operand;
712
713 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
714 if (!operand)
715 {
716 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
717 }
718
719 type = operand->type;
720 return true;
721}
722
723template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000724 typename HalOperand = typename HalPolicy::Operand>
725bool IsOperandConstant(const HalOperand& operand)
726{
727 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
728
729 HalOperandLifeTime lifetime = operand.lifetime;
730
731 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
732 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
733 lifetime == HalOperandLifeTime::NO_VALUE;
734}
735
736template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 typename HalOperand = typename HalPolicy::Operand,
738 typename HalModel = typename HalPolicy::Model>
739ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
740 const HalModel& model,
741 const ConversionData& data,
742 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
743 const armnn::TensorShape* overrideTensorShape = nullptr,
744 bool optional = false)
745{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 if (!IsOperandTypeSupportedForTensors(operand.type))
747 {
748 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
749 return ConstTensorPin();
750 }
751
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000752 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 {
754 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
755 return ConstTensorPin();
756 }
757
758 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
759 if (!valueStart)
760 {
761 if (optional)
762 {
763 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
764 return ConstTensorPin(true);
765 }
766 // mandatory tensor with no values
767 Fail("%s: failed to get operand address", __func__);
768 return ConstTensorPin();
769 }
770
771 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000772 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
773 if (tensorInfo.HasPerAxisQuantization())
774 {
775 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
776 }
777
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100778 if (overrideTensorShape != nullptr)
779 {
780 tensorInfo.SetShape(*overrideTensorShape);
781 }
782 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
783}
784
785template<typename HalPolicy,
786 typename HalOperation = typename HalPolicy::Operation,
787 typename HalModel = typename HalPolicy::Model>
788ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
789 uint32_t inputIndex,
790 const HalModel& model,
791 const ConversionData& data,
792 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
793 const armnn::TensorShape* overrideTensorShape = nullptr,
794 bool optional = false)
795{
796 using HalOperand = typename HalPolicy::Operand;
797
798 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
799 if (!operand)
800 {
801 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
802 return ConstTensorPin();
803 }
804 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
805 model,
806 data,
807 dimensionMappings,
808 overrideTensorShape,
809 optional);
810}
811
812template<typename HalPolicy,
813 typename OutputType,
814 typename HalOperandType = typename HalPolicy::OperandType,
815 typename HalOperation = typename HalPolicy::Operation,
816 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100817bool GetInputScalar(const HalOperation& operation,
818 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100819 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100820 OutputType& outValue,
821 const HalModel& model,
822 const ConversionData& data)
823{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100824 using HalOperand = typename HalPolicy::Operand;
825
826 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100827 if (!operand)
828 {
829 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
830 }
831
832 if (operand->type != type)
833 {
834 return Fail("%s: unexpected operand type: %s (should be %s)",
835 __func__, toString(operand->type).c_str(), toString(type).c_str());
836 }
837
838 if (operand->location.length != sizeof(OutputType))
839 {
840 return Fail("%s: incorrect operand location length: %i (should be %i)",
841 __func__, operand->location.length, sizeof(OutputType));
842 }
843
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100844 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100845 if (!valueAddress)
846 {
847 return Fail("%s: failed to get address for operand", __func__);
848 }
849
850 outValue = *(static_cast<const OutputType*>(valueAddress));
851 return true;
852}
853
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100854template<typename HalPolicy,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100857bool GetInputInt32(const HalOperation& operation,
858 uint32_t inputIndex,
859 int32_t& outValue,
860 const HalModel& model,
861 const ConversionData& data)
862{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100863 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100864}
865
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100866template<typename HalPolicy,
867 typename HalOperation = typename HalPolicy::Operation,
868 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100869bool GetInputFloat32(const HalOperation& operation,
870 uint32_t inputIndex,
871 float& outValue,
872 const HalModel& model,
873 const ConversionData& data)
874{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100875 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100876}
877
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878template<typename HalPolicy,
879 typename HalOperation = typename HalPolicy::Operation,
880 typename HalOperandType = typename HalPolicy::OperandType,
881 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100882bool GetInputActivationFunctionImpl(const HalOperation& operation,
883 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100884 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100885 ActivationFn& outActivationFunction,
886 const HalModel& model,
887 const ConversionData& data)
888{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100889 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100890 {
891 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
892 __func__,
893 toString(type).c_str(),
894 toString(OperandType::INT32).c_str(),
895 toString(OperandType::TENSOR_INT32).c_str());
896 }
897
898 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100899 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100900 {
901 return Fail("%s: failed to get activation input value", __func__);
902 }
903 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
904 return true;
905}
906
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100907template<typename HalPolicy,
908 typename HalOperation = typename HalPolicy::Operation,
909 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100910bool GetInputActivationFunction(const HalOperation& operation,
911 uint32_t inputIndex,
912 ActivationFn& outActivationFunction,
913 const HalModel& model,
914 const ConversionData& data)
915{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100916 return GetInputActivationFunctionImpl<HalPolicy>(operation,
917 inputIndex,
918 HalPolicy::OperandType::INT32,
919 outActivationFunction,
920 model,
921 data);
arovir01b0717b52018-09-05 17:03:25 +0100922}
923
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100924template<typename HalPolicy,
925 typename HalOperation = typename HalPolicy::Operation,
926 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100927bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
928 uint32_t inputIndex,
929 ActivationFn& outActivationFunction,
930 const HalModel& model,
931 const ConversionData& data)
932{
933 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100934 return GetInputActivationFunctionImpl<HalPolicy>(operation,
935 inputIndex,
936 HalPolicy::OperandType::INT32,
937 outActivationFunction,
938 model,
939 data);
arovir01b0717b52018-09-05 17:03:25 +0100940}
941
942
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943template<typename HalPolicy,
944 typename HalOperation = typename HalPolicy::Operation,
945 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100946bool GetOptionalInputActivation(const HalOperation& operation,
947 uint32_t inputIndex,
948 ActivationFn& activationFunction,
949 const HalModel& model,
950 const ConversionData& data)
951{
952 if (operation.inputs.size() <= inputIndex)
953 {
954 activationFunction = ActivationFn::kActivationNone;
955 }
956 else
957 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100959 {
960 return Fail("%s: Operation has invalid inputs", __func__);
961 }
962 }
963 return true;
964}
965
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100966template<typename HalPolicy,
967 typename ConvolutionDescriptor,
968 typename HalOperation = typename HalPolicy::Operation,
969 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100970bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
971 uint32_t dilationXIndex,
972 ConvolutionDescriptor& descriptor,
973 const HalModel& model,
974 const ConversionData& data)
975{
976 bool success = true;
977 if (operation.inputs.size() >= dilationXIndex + 2)
978 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979 success &= GetInputScalar<HalPolicy>(operation,
980 dilationXIndex,
981 HalPolicy::OperandType::INT32,
982 descriptor.m_DilationX,
983 model,
984 data);
985 success &= GetInputScalar<HalPolicy>(operation,
986 dilationXIndex + 1,
987 HalPolicy::OperandType::INT32,
988 descriptor.m_DilationY,
989 model,
990 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100991 }
992
993 return success;
994}
995
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996template<typename HalPolicy,
997 typename HalOperand = typename HalPolicy::Operand,
998 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100999bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001000 std::vector<int32_t>& outValues,
1001 const HalModel& model,
1002 const ConversionData& data)
1003{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001005 {
1006 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1007 }
1008
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001009 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001010 if (!startAddress)
1011 {
1012 return Fail("%s: failed to get operand address", __func__, operand.type);
1013 }
1014
1015 // Check number of bytes is sensible
1016 const uint32_t numBytes = operand.location.length;
1017 if (numBytes % sizeof(int32_t) != 0)
1018 {
1019 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1020 __func__, numBytes, sizeof(int32_t));
1021 }
1022
1023 outValues.resize(numBytes / sizeof(int32_t));
1024 memcpy(outValues.data(), startAddress, numBytes);
1025 return true;
1026}
1027
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028template<typename HalPolicy,
1029 typename HalOperation = typename HalPolicy::Operation,
1030 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001031bool GetInputPaddingScheme(const HalOperation& operation,
1032 uint32_t inputIndex,
1033 PaddingScheme& outPaddingScheme,
1034 const HalModel& model,
1035 const ConversionData& data)
1036{
1037 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001038 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001039 {
1040 return Fail("%s: failed to get padding scheme input value", __func__);
1041 }
1042
1043 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1044 return true;
1045}
1046
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047template<typename HalPolicy,
1048 typename HalOperation = typename HalPolicy::Operation,
1049 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001050LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1051 uint32_t inputIndex,
1052 const HalModel& model,
1053 ConversionData& data)
1054{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001055 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001056 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001057 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1058
1059 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001060 if (!operand)
1061 {
1062 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1063 return LayerInputHandle();
1064 }
1065
1066 if (!IsOperandTypeSupportedForTensors(operand->type))
1067 {
1068 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1069 return LayerInputHandle();
1070 }
1071
Sadik Armagan44bcc022019-06-18 17:21:36 +01001072 try
arovir01b0717b52018-09-05 17:03:25 +01001073 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001074 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001075 if (IsDynamicTensor(operandTensorInfo))
1076 {
1077 Fail("%s: dynamic input tensors are not supported", __func__);
1078 return LayerInputHandle();
1079 }
arovir01b0717b52018-09-05 17:03:25 +01001080
Sadik Armagan44bcc022019-06-18 17:21:36 +01001081 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001082 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001084 {
1085 // NOTE: We must check whether we can support the input tensor on at least one
1086 // of the provided backends; otherwise we cannot convert the operation
1087 bool isInputSupported = false;
1088 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1089 IsInputSupported,
1090 data.m_Backends,
1091 isInputSupported,
1092 operandTensorInfo);
1093
1094 if (!isInputSupported)
1095 {
1096 Fail("%s: unsupported input tensor", __func__);
1097 return LayerInputHandle();
1098 }
1099
1100 BOOST_FALLTHROUGH; // intentional fallthrough
1101 }
1102 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001103 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001104 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001105 // The tensor is either an operand internal to the model, or a model input.
1106 // It can be associated with an ArmNN output slot for an existing layer.
1107
1108 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1109 const uint32_t operandIndex = operation.inputs[inputIndex];
1110 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001111 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001112 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001113 case HalOperandLifeTime::CONSTANT_REFERENCE:
1114 {
1115 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1116 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1117 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001118 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001119 bool isSupported = false;
1120 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1121 IsConstantSupported,
1122 data.m_Backends,
1123 isSupported,
1124 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001125 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001126 {
1127 return LayerInputHandle();
1128 }
1129
1130 armnn::IConnectableLayer* constantLayer =
1131 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1132 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1133 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1134
1135 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1136 }
1137 else
1138 {
1139 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001140 return LayerInputHandle();
1141 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001142 break;
arovir01b0717b52018-09-05 17:03:25 +01001143 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001144 default:
arovir01b0717b52018-09-05 17:03:25 +01001145 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001146 // Unsupported lifetime for an input tensor
1147 Fail("%s: unsupported lifetime for input tensor: %s",
1148 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001149 return LayerInputHandle();
1150 }
arovir01b0717b52018-09-05 17:03:25 +01001151 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001152 }
1153 catch (UnsupportedOperand<HalOperandType>& e)
1154 {
1155 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1156 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001157 }
1158}
1159
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001160template<typename HalPolicy,
1161 typename HalOperation = typename HalPolicy::Operation,
1162 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001163bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1164 uint32_t operationOutputIndex,
1165 armnn::IConnectableLayer& layer,
1166 uint32_t layerOutputIndex,
1167 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001168 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001169{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001170 using HalOperand = typename HalPolicy::Operand;
1171
1172 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001173 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1174 {
1175 return false;
1176 }
1177
1178 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1179
1180 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1181 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1182
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001183 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001184
1185 return true;
1186}
1187
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001188template<typename HalPolicy,
1189 typename HalOperation = typename HalPolicy::Operation,
1190 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001191armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1192 uint32_t inputIndex,
1193 const HalModel& model,
1194 ConversionData& data)
1195{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196 using HalOperand = typename HalPolicy::Operand;
1197
1198 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001199 if (!operand)
1200 {
1201 return armnn::DataLayout::NHWC;
1202 }
1203
1204 if (!IsBool(*operand))
1205 {
1206 return armnn::DataLayout::NHWC;
1207 }
1208
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001209 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001210 if (!valueAddress)
1211 {
1212 return armnn::DataLayout::NHWC;
1213 }
1214
1215 if (*(static_cast<const bool*>(valueAddress)))
1216 {
1217 return armnn::DataLayout::NCHW;
1218 }
1219 else
1220 {
1221 return armnn::DataLayout::NHWC;
1222 }
1223}
1224
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001225template<typename HalPolicy,
1226 typename HalOperation = typename HalPolicy::Operation,
1227 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001228bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1229 uint32_t outputIndex,
1230 armnn::IConnectableLayer& layer,
1231 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001232 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001233{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001234 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1235 outputIndex,
1236 layer,
1237 outputIndex,
1238 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001239 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001240}
1241
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001242template<typename HalPolicy,
1243 typename HalOperation = typename HalPolicy::Operation,
1244 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001245bool ConvertToActivation(const HalOperation& operation,
1246 const char* operationName,
1247 const armnn::ActivationDescriptor& activationDesc,
1248 const HalModel& model,
1249 ConversionData& data)
1250{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001251 using HalOperand = typename HalPolicy::Operand;
1252
1253 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001254 if (!input.IsValid())
1255 {
1256 return Fail("%s: Input 0 is invalid", operationName);
1257 }
1258
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001259 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001260 if (!outputOperand)
1261 {
1262 return false;
1263 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001264
1265 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001266 if (IsDynamicTensor(outInfo))
1267 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001268 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001269 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001270
1271 bool isSupported = false;
1272 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1273 IsActivationSupported,
1274 data.m_Backends,
1275 isSupported,
1276 input.GetTensorInfo(),
1277 outInfo,
1278 activationDesc);
1279 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001280 {
1281 return false;
1282 }
1283
1284 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1285 BOOST_ASSERT(layer != nullptr);
1286 input.Connect(layer->GetInputSlot(0));
1287
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001288 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001289}
1290
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001291template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001292 typename HalOperation = typename HalPolicy::Operation,
1293 typename HalModel = typename HalPolicy::Model>
1294bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1295{
1296 armnn::ActivationDescriptor desc;
1297 desc.m_Function = armnn::ActivationFunction::ReLu;
1298
1299 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1300}
1301
1302template<typename HalPolicy,
1303 typename HalOperation = typename HalPolicy::Operation,
1304 typename HalModel = typename HalPolicy::Model>
1305bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1306{
1307 armnn::ActivationDescriptor desc;
1308 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1309 desc.m_A = 1.0f;
1310 desc.m_B = -1.0f;
1311
1312 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1313}
1314
1315template<typename HalPolicy,
1316 typename HalOperation = typename HalPolicy::Operation,
1317 typename HalModel = typename HalPolicy::Model>
1318bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1319{
1320 armnn::ActivationDescriptor desc;
1321 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1322 desc.m_A = 6.0f;
1323
1324 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1325}
1326
1327template<typename HalPolicy,
1328 typename HalOperation = typename HalPolicy::Operation,
1329 typename HalModel = typename HalPolicy::Model>
1330bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1331{
1332 armnn::ActivationDescriptor desc;
1333 desc.m_Function = armnn::ActivationFunction::TanH;
1334 desc.m_A = 1.0f; // android nn does not support tanH parameters
1335 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1336
1337 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1338}
1339
1340template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001343bool ConvertPaddings(const HalOperation& operation,
1344 const HalModel& model,
1345 ConversionData& data,
1346 unsigned int rank,
1347 armnn::PadDescriptor& padDescriptor)
1348{
1349 using HalOperand = typename HalPolicy::Operand;
1350
1351 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1352 if (!paddingsOperand)
1353 {
1354 return Fail("%s: Could not read paddings operand", __func__);
1355 }
1356
1357 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1358 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1359 {
1360 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1361 }
1362
1363 std::vector<int32_t> paddings;
1364 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1365
1366 // add padding for each dimension of input tensor.
1367 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1368 {
1369 int paddingBeforeInput = paddings[i];
1370 int paddingAfterInput = paddings[i + 1];
1371
1372 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1373 {
1374 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1375 }
1376
1377 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1378 }
1379
1380 return true;
1381}
1382
1383template<typename HalPolicy,
1384 typename HalOperation = typename HalPolicy::Operation,
1385 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001386bool ConvertPooling2d(const HalOperation& operation,
1387 const char* operationName,
1388 armnn::PoolingAlgorithm poolType,
1389 const HalModel& model,
1390 ConversionData& data)
1391{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001392 using HalOperand = typename HalPolicy::Operand;
1393 using HalOperandType = typename HalPolicy::OperandType;
1394
1395 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001396 if (!input.IsValid())
1397 {
1398 return Fail("%s: Could not read input 0", operationName);
1399 }
1400
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001401 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001402 if (!output)
1403 {
1404 return Fail("%s: Could not read output 0", __func__);
1405 }
1406
1407 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1408 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1409
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001410 if (IsDynamicTensor(outputInfo))
1411 {
1412 return Fail("%s: Dynamic output tensors are not supported", __func__);
1413 }
1414
arovir01b0717b52018-09-05 17:03:25 +01001415 armnn::Pooling2dDescriptor desc;
1416 desc.m_PoolType = poolType;
1417 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001418 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001419
1420 ActivationFn activation;
1421
Sadik Armagan15d63e22019-07-26 16:59:35 +01001422 auto inputSize = operation.inputs.size();
1423
1424 if (inputSize >= 10)
1425 {
1426 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1427 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1428 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1429 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1430 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1431 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1432 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1433 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1434 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1435 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1436 {
1437 return Fail("%s: Operation has invalid inputs", operationName);
1438 }
1439
1440 if (Is12Operand(*output))
1441 {
1442 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1443 }
1444 }
1445 else
arovir01b0717b52018-09-05 17:03:25 +01001446 {
1447 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1448 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001449 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1450 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1451 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1452 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1453 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1454 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001455 {
1456 return Fail("%s: Operation has invalid inputs", operationName);
1457 }
1458
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001459 const unsigned int inputWidth = inputInfo.GetShape()[2];
1460 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001461
1462 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1463 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001464
1465 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001466 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001467 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001468 }
1469 }
1470
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001471 bool isSupported = false;
1472 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1473 IsPooling2dSupported,
1474 data.m_Backends,
1475 isSupported,
1476 inputInfo,
1477 outputInfo,
1478 desc);
1479 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001480 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001481 return false;
arovir01b0717b52018-09-05 17:03:25 +01001482 }
arovir01b0717b52018-09-05 17:03:25 +01001483
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001484 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1485 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001486 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001487 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001488 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001489
1490 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1491 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001492 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001493 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001494 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001495
1496 input.Connect(pooling2dLayer->GetInputSlot(0));
1497
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001498 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001499}
1500
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001501template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001502 typename HalOperation = typename HalPolicy::Operation,
1503 typename HalModel = typename HalPolicy::Model>
1504bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001505{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001506 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001507
1508 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1509 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1510
1511 if (!input0.IsValid() || !input1.IsValid())
1512 {
1513 return Fail("%s: Operation has invalid inputs", __func__);
1514 }
1515
1516 // The FuseActivation parameter is always the input index 2
1517 // and it should be optional
1518 ActivationFn activationFunction;
1519 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1520 {
1521 return Fail("%s: Operation has invalid inputs", __func__);
1522 }
1523
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001524 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001525 if (!outputOperand)
1526 {
1527 return false;
1528 }
1529
1530 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1531 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1532
1533 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1534 if (IsDynamicTensor(outputInfo))
1535 {
1536 return Fail("%s: Dynamic output tensors are not supported", __func__);
1537 }
1538
1539 bool isSupported = false;
1540 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1541 IsAdditionSupported,
1542 data.m_Backends,
1543 isSupported,
1544 inputInfo0,
1545 inputInfo1,
1546 outputInfo);
1547 if (!isSupported)
1548 {
1549 return false;
1550 }
1551
1552 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1553 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1554
1555 if (endLayer != nullptr)
1556 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001557 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1558 if (!isReshapeSupported)
1559 {
1560 return false;
1561 }
1562
Mike Kelly46272802019-08-14 17:00:48 +01001563 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1564 }
1565 else
1566 {
1567 return Fail("%s: ProcessActivation failed", __func__);
1568 }
1569}
1570
1571template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001572 typename HalOperation = typename HalPolicy::Operation,
1573 typename HalModel = typename HalPolicy::Model>
1574bool ConvertArgMinMax(const HalOperation& operation,
1575 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001576 ConversionData& data,
1577 armnn::ArgMinMaxFunction argMinMaxFunction)
1578{
1579 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1580
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001581 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001582 using HalOperandType = typename HalPolicy::OperandType;
1583
1584 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1585
1586 if (!input0.IsValid())
1587 {
1588 return Fail("%s: Operation has invalid inputs", __func__);
1589 }
1590
1591 int32_t axis;
1592 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1593 {
1594 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1595 }
1596
1597 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1598 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1599
1600 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1601 {
1602 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1603 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1604 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1605 return Fail("%s: Axis must be in range [-n, n)", __func__);
1606 }
1607
1608 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1609 if (!output)
1610 {
1611 return Fail("%s: Could not read output 0", __func__);
1612 }
1613
1614 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1615
1616 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1617 if (IsDynamicTensor(outputInfo))
1618 {
1619 return Fail("%s: Dynamic output tensors are not supported", __func__);
1620 }
1621
1622 armnn::ArgMinMaxDescriptor descriptor;
1623 descriptor.m_Function = argMinMaxFunction;
1624 descriptor.m_Axis = axis;
1625
1626 bool isSupported = false;
1627 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1628 IsArgMinMaxSupported,
1629 data.m_Backends,
1630 isSupported,
1631 inputInfo0,
1632 outputInfo,
1633 descriptor);
1634 if (!isSupported)
1635 {
1636 return false;
1637 }
1638
1639 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1640 assert(layer != nullptr);
1641
1642 input0.Connect(layer->GetInputSlot(0));
1643
1644 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1645}
1646
1647template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001648 typename HalOperation = typename HalPolicy::Operation,
1649 typename HalModel = typename HalPolicy::Model>
1650bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001651{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001652 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001653 using HalOperandType = typename HalPolicy::OperandType;
1654
1655 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1656 if (operation.inputs.size() <= 1)
1657 {
1658 return Fail("%s: Operation has insufficient arguments", __func__);
1659 }
1660
1661 // Get inputs and outputs
1662 const std::size_t numInputTensors = operation.inputs.size() - 1;
1663
1664 int32_t concatDim;
1665 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1666 {
1667 return Fail("%s: Operation has invalid inputs", __func__);
1668 }
1669
1670 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1671 if (!outputOperand)
1672 {
1673 return Fail("%s: Operation has no outputs", __func__);
1674 }
1675
1676
1677 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1678 armnn::TensorShape outputShape = outputInfo.GetShape();
1679
1680 //
1681 // handle negative concat dims along the lines of tensorflow as described here:
1682 // https://www.tensorflow.org/api_docs/python/tf/concat
1683 // "negative axis refers to axis + rank(values)-th dimension"
1684 //
1685 if (concatDim < 0)
1686 {
1687 concatDim += outputShape.GetNumDimensions();
1688 }
1689
1690 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1691 {
1692 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1693 }
1694
1695 std::vector<LayerInputHandle> inputHandles;
1696 std::vector<armnn::TensorShape> inputShapes;
1697
1698 inputHandles.reserve(numInputTensors);
1699 inputShapes.reserve(numInputTensors);
1700
1701 bool inputsHaveBeenReshaped = false;
1702 unsigned int tensorDimensionsAdded = 0;
1703
1704 for (uint32_t i = 0; i < numInputTensors; ++i)
1705 {
1706 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1707 if (!operand)
1708 {
1709 return Fail("%s: Operation has invalid inputs", __func__);
1710 }
1711
Teresa Charlin3b959602019-10-31 17:05:47 +00001712 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1713 if (!operandInputHandle.IsValid())
1714 {
1715 return Fail("%s: Operation has invalid inputs", __func__);
1716 }
Mike Kellyb8805202019-07-31 17:25:43 +01001717
Teresa Charlin3b959602019-10-31 17:05:47 +00001718 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001719 if (operandShape.GetNumDimensions() == 0)
1720 {
1721 return Fail("%s: Operands with rank 0 are not supported", __func__);
1722 }
1723
1724 if (RequiresReshape(operandShape))
1725 {
1726 inputsHaveBeenReshaped = true;
1727
1728 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1729
1730 // Expand the tensor to three dimensions
1731 if (operandShape.GetNumDimensions() == 2)
1732 {
1733 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1734 tensorDimensionsAdded = 1;
1735 }
1736 else
1737 {
1738 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1739 tensorDimensionsAdded = 2;
1740 }
1741
1742 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1743 *data.m_Network,
1744 operandInputHandle,
1745 reshapeInfo
1746 );
1747
1748 // Point to the reshape operation rather then the input operation
1749 operandShape = reshapeInfo.GetShape();
1750 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1751 }
1752
1753 inputShapes.emplace_back(operandShape);
1754 inputHandles.emplace_back(operandInputHandle);
1755
1756 if (!inputHandles.back().IsValid())
1757 {
1758 return Fail("%s: Operation has invalid inputs", __func__);
1759 }
1760 }
1761
1762 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1763
1764 if (inputsHaveBeenReshaped)
1765 {
1766 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1767 concatDim += tensorDimensionsAdded;
1768
1769 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1770 if (tensorDimensionsAdded == 1)
1771 {
1772 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1773 }
1774 else if (tensorDimensionsAdded == 2)
1775 {
1776 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1777 }
1778 }
1779
1780 // Check if permutations is required and get the pair of permutations required for the concatenation.
1781 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1782 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1783 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1784
1785 bool needPermute =
1786 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1787
1788 if (needPermute)
1789 {
1790 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1791 }
1792
1793 outputInfo.SetShape(outputShape);
1794
1795 // this is no-op for identity swizzles, otherwise it replaces both
1796 // the handles and shapes with the swizzled layer output handles and shapes
1797 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1798
1799 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1800 armnn::OriginsDescriptor concatDescriptor;
1801
1802 try
1803 {
1804 // The concat descriptor is always created across the only supported concat dimension
1805 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1806 concatDescriptor =
1807 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1808 }
1809 catch (const armnn::Exception& error)
1810 {
1811 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1812 }
1813
1814 // Validate the output shape is correct given the input shapes based on the
1815 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1816 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1817 {
1818 return Fail("%s: Error validating the output shape for concat", __func__);
1819 }
1820
1821 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1822 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1823 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1824
1825 bool isSupported = false;
1826 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1827 IsConcatSupported,
1828 data.m_Backends,
1829 isSupported,
1830 inputTensorInfos,
1831 outputInfo,
1832 concatDescriptor);
1833 if (!isSupported)
1834 {
1835 return false;
1836 }
1837
1838 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1839 assert(layer != nullptr);
1840 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1841
1842 // Connect inputs to the layer
1843 const int numInputSlots = layer->GetNumInputSlots();
1844 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1845 for (int i = 0; i < numInputSlots; ++i)
1846 {
1847 // connect the input directly to the merge (concat) layer
1848 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1849 }
1850
1851 if (needPermute)
1852 {
1853 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1854 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1855 layer->GetOutputSlot(0),
1856 permutationPair.second);
1857 layer = &deswizzleLayer;
1858 }
1859
1860 if (inputsHaveBeenReshaped)
1861 {
1862 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1863
1864 // Undo the reshape knowing the amount of dimensions added
1865 if (tensorDimensionsAdded == 1)
1866 {
1867 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1868 afterConcatInfo.GetShape()[2] }));
1869 }
1870 else if (tensorDimensionsAdded == 2)
1871 {
1872 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1873 }
1874
1875 layer = &AddReshapeLayer(
1876 *data.m_Network,
1877 layer->GetOutputSlot(0),
1878 afterConcatInfo
1879 );
1880 }
1881
1882 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1883}
1884
1885template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001886 typename HalOperation = typename HalPolicy::Operation,
1887 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001888bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1889{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001890 using HalOperand = typename HalPolicy::Operand;
1891 using HalOperandType = typename HalPolicy::OperandType;
1892
1893 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001894 if (!input.IsValid())
1895 {
1896 return Fail("%s: Operation has invalid inputs", __func__);
1897 }
1898
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001899 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001900 if (!output)
1901 {
1902 return Fail("%s: Could not read output 0", __func__);
1903 }
1904
1905 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001906 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001907
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001908 if (IsDynamicTensor(outputInfo))
1909 {
1910 return Fail("%s: Dynamic output tensors are not supported", __func__);
1911 }
1912
Mike Kellyb5fdf382019-06-11 16:35:25 +01001913 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001914 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1915 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001916
1917 if (!weightsPin.IsValid() || !biasPin.IsValid())
1918 {
1919 return Fail("%s: Operation has invalid inputs", __func__);
1920 }
1921
1922 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001923 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001924 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1925
1926 armnn::Convolution2dDescriptor desc;
1927 desc.m_DataLayout = armnn::DataLayout::NHWC;
1928 ActivationFn activation;
1929
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001930 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001931 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001932 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1933 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1934 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1935 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1936 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1937 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001938 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001939 {
1940 return Fail("%s: Operation has invalid inputs", __func__);
1941 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001942 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001943 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001944 {
1945 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001946 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1947 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1948 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001949 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001950 {
1951 return Fail("%s: Operation has invalid inputs", __func__);
1952 }
1953
1954 const uint32_t kernelX = weights.GetShape()[2];
1955 const uint32_t kernelY = weights.GetShape()[1];
1956 const uint32_t inputX = inputInfo.GetShape()[2];
1957 const uint32_t inputY = inputInfo.GetShape()[1];
1958
1959 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1960 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001961 }
1962 else
1963 {
1964 return Fail("%s: Unsupported number of operation inputs", __func__);
1965 }
1966
1967 desc.m_BiasEnabled = true;
1968 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1969
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001970 bool isSupported = false;
1971 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1972 IsConvolution2dSupported,
1973 data.m_Backends,
1974 isSupported,
1975 inputInfo,
1976 outputInfo,
1977 desc,
1978 weights.GetInfo(),
1979 biases);
1980 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001981 {
1982 return false;
1983 }
1984
1985 armnn::IConnectableLayer* startLayer =
1986 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1987
1988 if (!startLayer)
1989 {
1990 return Fail("%s: AddConvolution2dLayer failed", __func__);
1991 }
1992
1993 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1994
1995 if (!endLayer)
1996 {
1997 return Fail("%s: ProcessActivation failed", __func__);
1998 }
1999
2000 input.Connect(startLayer->GetInputSlot(0));
2001
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002002 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002003}
2004
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002005template<typename HalPolicy,
2006 typename HalOperation = typename HalPolicy::Operation,
2007 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002008bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2009{
2010 using HalOperand = typename HalPolicy::Operand;
2011 using HalOperandType = typename HalPolicy::OperandType;
2012
2013 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2014 if (!input.IsValid() )
2015 {
2016 return Fail("%s: Operation has invalid inputs", __func__);
2017 }
2018
2019 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2020 unsigned int rank = inputInfo.GetNumDimensions();
2021 if (rank != 4)
2022 {
2023 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2024 }
2025
2026 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2027 if (!output)
2028 {
2029 return Fail("%s: Could not read output 0", __func__);
2030 }
2031
2032 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2033 if (IsDynamicTensor(outputInfo))
2034 {
2035 return Fail("%s: Dynamic output tensors are not supported", __func__);
2036 }
2037
2038 armnn::DepthToSpaceDescriptor descriptor;
2039
2040 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2041 if (descriptor.m_BlockSize <= 1)
2042 {
2043 return Fail("%s: Block size must be at least 1 in all dimensions");
2044 }
2045
2046 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2047 if (Is12Operand(*output))
2048 {
2049 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2050 }
2051
2052 bool isSupported = false;
2053 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2054 IsDepthToSpaceSupported,
2055 data.m_Backends,
2056 isSupported,
2057 inputInfo,
2058 outputInfo,
2059 descriptor);
2060 if (!isSupported)
2061 {
2062 return false;
2063 }
2064
2065 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2066 assert(layer != nullptr);
2067 input.Connect(layer->GetInputSlot(0));
2068
2069 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2070}
2071
2072template<typename HalPolicy,
2073 typename HalOperation = typename HalPolicy::Operation,
2074 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002075bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2076{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002077 using HalOperand = typename HalPolicy::Operand;
2078 using HalOperandType = typename HalPolicy::OperandType;
2079
2080 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002081
2082 if (!input.IsValid())
2083 {
2084 return Fail("%s: Operation has invalid inputs", __func__);
2085 }
2086
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002087 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002088
2089 if (!output)
2090 {
2091 return Fail("%s: Could not read output 0", __func__);
2092 }
2093
2094 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002095 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002096
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002097 if (IsDynamicTensor(outputInfo))
2098 {
2099 return Fail("%s: Dynamic output tensors are not supported", __func__);
2100 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002101
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002102 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002103 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002104 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002105
2106 if (weightsOperand == nullptr)
2107 {
2108 return Fail("%s: Operand is invalid", __func__);
2109 }
2110 armnn::DepthwiseConvolution2dDescriptor desc;
2111 desc.m_DataLayout = armnn::DataLayout::NHWC;
2112
Mike Kellyb5fdf382019-06-11 16:35:25 +01002113 // Reinterpret weight data as [ H, W, I, M ]
2114 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2115 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002116 inputInfo.GetShape()[3],
2117 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002118
2119 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2120 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2121
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002122 const ConstTensorPin weightsPin =
2123 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2124 1,
2125 model,
2126 data,
2127 HWIMToMIHW,
2128 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002129
2130 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002131 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002132
2133 if (!weightsPin.IsValid() || !biasPin.IsValid())
2134 {
2135 return Fail("%s: Operation has invalid inputs", __func__);
2136 }
2137
2138 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2139 armnn::ConstTensor bias = biasPin.GetConstTensor();
2140 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2141
2142 ActivationFn activation;
2143
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002144 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002145 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002146 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2147 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2148 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2149 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2150 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2151 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002152 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002153 {
2154 return Fail("%s: Operation has invalid inputs", __func__);
2155 }
2156 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002157 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002158 {
2159 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002160 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2161 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2162 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002163 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002164 {
2165 return Fail("%s: Operation has invalid inputs", __func__);
2166 }
2167
2168 const uint32_t kernelX = weights.GetShape()[3];
2169 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002170 const uint32_t inputX = inputInfo.GetShape()[2];
2171 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002172
2173 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2174 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2175 }
2176 else
2177 {
2178 return Fail("%s: Unsupported number of operation inputs", __func__);
2179 }
2180
2181 desc.m_BiasEnabled = true;
2182 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2183
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002184 bool isSupported = false;
2185 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2186 IsDepthwiseConvolutionSupported,
2187 data.m_Backends,
2188 isSupported,
2189 inputInfo,
2190 outputInfo,
2191 desc,
2192 weights.GetInfo(),
2193 biases);
2194 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002195 {
2196 return false;
2197 }
2198
2199 armnn::IConnectableLayer* startLayer =
2200 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2201 if (!startLayer)
2202 {
2203 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2204 }
2205
2206 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2207 if (!endLayer)
2208 {
2209 return Fail("%s: ProcessActivation failed", __func__);
2210 }
2211
2212 input.Connect(startLayer->GetInputSlot(0));
2213
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002214 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002215}
2216
Mike Kelly3c673942019-07-25 09:26:06 +01002217template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002218 typename HalOperation = typename HalPolicy::Operation,
2219 typename HalModel = typename HalPolicy::Model>
2220bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002221{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002222 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002223
2224 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2225 if (!input.IsValid())
2226 {
2227 return Fail("%s: Operation has invalid input", __func__);
2228 }
2229
Sadik Armagan98c0f662019-11-21 15:54:36 +00002230 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2231 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2232 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2233 {
2234 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2235 }
2236
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002237 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002238 if (!outputOperand)
2239 {
2240 return Fail("%s: Operation has invalid outputs", __func__);
2241 }
2242
2243 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2244 if (IsDynamicTensor(outputInfo))
2245 {
2246 return Fail("%s: Dynamic output tensors are not supported", __func__);
2247 }
2248
2249 bool isSupported = false;
2250 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2251 IsDequantizeSupported,
2252 data.m_Backends,
2253 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002254 inputInfo,
2255 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002256 if (!isSupported)
2257 {
2258 return false;
2259 }
2260
2261 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2262 assert(layer != nullptr);
2263 input.Connect(layer->GetInputSlot(0));
2264
2265 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2266}
2267
2268template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002269 typename HalOperation = typename HalPolicy::Operation,
2270 typename HalModel = typename HalPolicy::Model>
2271bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002272{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002273 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002274
2275 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2276 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2277
2278 if (!input0.IsValid() || !input1.IsValid())
2279 {
2280 return Fail("%s: Operation has invalid inputs", __func__);
2281 }
2282
2283 // The FuseActivation parameter is always the input index 2
2284 // and it should be optional
2285 ActivationFn activationFunction;
2286 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2287 {
2288 return Fail("%s: Operation has invalid inputs", __func__);
2289 }
2290
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002291 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002292 if (!output)
2293 {
2294 return Fail("%s: Could not read output 0", __func__);
2295 }
2296
2297 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2298 if (IsDynamicTensor(outputInfo))
2299 {
2300 return Fail("%s: Dynamic output tensors are not supported", __func__);
2301 }
2302
2303 bool isSupported = false;
2304 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2305 IsDivisionSupported,
2306 data.m_Backends,
2307 isSupported,
2308 input0.GetTensorInfo(),
2309 input1.GetTensorInfo(),
2310 outputInfo);
2311 if (!isSupported)
2312 {
2313 return false;
2314 }
2315
2316 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2317 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2318
2319 if (endLayer)
2320 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002321 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2322 if (!isReshapeSupported)
2323 {
2324 return false;
2325 }
2326
Mike Kelly46272802019-08-14 17:00:48 +01002327 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2328 }
2329 return Fail("%s: ProcessActivation failed", __func__);
2330}
2331
2332template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002333 typename HalOperation = typename HalPolicy::Operation,
2334 typename HalModel = typename HalPolicy::Model>
2335bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002336{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002337 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002338
2339 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2340 if (!input.IsValid())
2341 {
2342 return Fail("%s: Operation has invalid inputs", __func__);
2343 }
2344
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002345 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002346 if (!outputOperand)
2347 {
2348 return Fail("%s: Operation has invalid outputs", __func__);
2349 }
2350
2351 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2352 if (IsDynamicTensor(outputInfo))
2353 {
2354 return Fail("%s: Dynamic output tensors are not supported", __func__);
2355 }
2356
2357 bool isSupported = false;
2358 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2359 IsFloorSupported,
2360 data.m_Backends,
2361 isSupported,
2362 input.GetTensorInfo(),
2363 outputInfo);
2364 if (!isSupported)
2365 {
2366 return false;
2367 }
2368
2369 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2370 assert(layer != nullptr);
2371 input.Connect(layer->GetInputSlot(0));
2372
2373 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2374}
2375
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002376inline bool IsQSymm8(const V1_0::Operand&)
2377{
2378 return false;
2379}
2380
2381#ifdef ARMNN_ANDROID_NN_V1_2
2382
2383inline bool IsQSymm8(const V1_2::Operand& operand)
2384{
2385 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2386}
2387
2388#endif
2389
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002390enum class DequantizeStatus
2391{
2392 SUCCESS,
2393 NOT_REQUIRED,
2394 INVALID_OPERAND
2395};
2396
2397using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2398
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002399template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002400 typename HalOperation = typename HalPolicy::Operation,
2401 typename HalModel = typename HalPolicy::Model>
2402DequantizeResult DequantizeIfRequired(size_t operand_index,
2403 const HalOperation& operation,
2404 const HalModel& model,
2405 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002406{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002407 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002408
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002409 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002410 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002411 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002412 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002413 }
2414
2415 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2416 {
2417 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002418 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002419 }
2420
2421 const size_t weightsInputIndex = operation.inputs[operand_index];
2422
2423 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2424 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2425 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2426 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002427 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002428 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002429 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2430 {
2431 continue;
2432 }
2433
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002434 size_t outOpIndex = weightsInputIndex + 1;
2435 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002436 {
2437 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002438 }
2439
2440 if (outOpIndex != weightsInputIndex)
2441 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002442 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002443 }
2444
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002445 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002446 BOOST_ASSERT(operand);
2447
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002448 if (!IsQSymm8(*operand))
2449 {
2450 // Only supporting dequantize from QSYMM8 to FLOAT
2451 break;
2452 }
2453
2454 // Allocate a new buffer for the dequantized data and manually dequantize
2455 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2456 if (!startValue)
2457 {
2458 // Failed to get the operand address
2459 break;
2460 }
2461
2462 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2463 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002464 const float quantizationScale = operand->scale;
2465
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002466 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2467 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2468 {
2469 float* dstPtr = dequantizedBuffer.get();
2470 BOOST_ASSERT(dstPtr);
2471 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2472 }
2473
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002474 // Construct tensor info for dequantized ConstTensor
2475 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2476 operand->dimensions.data(),
2477 armnn::DataType::Float32);
2478
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002479 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2480 std::move(tensorInfo),
2481 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002482 }
2483
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002484 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002485}
2486
2487template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002488 typename HalOperation = typename HalPolicy::Operation,
2489 typename HalModel = typename HalPolicy::Model>
2490ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2491 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002492 const ConversionData& data,
2493 size_t operandIndex,
2494 bool optional = false)
2495{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002496 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2497
2498 DequantizeStatus status = std::get<3>(dequantized);
2499 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002500 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002501 case DequantizeStatus::INVALID_OPERAND:
2502 {
2503 // return invalid const tensor pin
2504 return ConstTensorPin();
2505 }
2506 case DequantizeStatus::NOT_REQUIRED:
2507 {
2508 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2509 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2510 }
2511 case DequantizeStatus::SUCCESS:
2512 default:
2513 {
2514 return ConstTensorPin(
2515 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2516 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002517 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002518}
2519
2520
Mike Kelly46272802019-08-14 17:00:48 +01002521template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002522 typename HalOperation = typename HalPolicy::Operation,
2523 typename HalModel = typename HalPolicy::Model>
2524bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002525{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002526 using HalOperand = typename HalPolicy::Operand;
2527
Mike Kelly46272802019-08-14 17:00:48 +01002528 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2529 if (!input.IsValid())
2530 {
2531 return Fail("%s: Operation has invalid inputs", __func__);
2532 }
2533
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002534 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002535 if (!output)
2536 {
2537 return Fail("%s: Could not read output 0", __func__);
2538 }
2539
2540 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2541 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2542
2543 if (IsDynamicTensor(outputInfo))
2544 {
2545 return Fail("%s: Dynamic output tensors are not supported", __func__);
2546 }
2547
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002548 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2549 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002550
2551 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002552 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002553 return Fail("%s: Operation has invalid weights", __func__);
2554 }
2555
2556 if (!biasPin.IsValid())
2557 {
2558 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002559 }
2560
2561 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2562 armnn::ConstTensor bias = biasPin.GetConstTensor();
2563 armnn::TensorInfo reshapedInfo = inputInfo;
2564
2565 try
2566 {
2567 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002568 }
2569 catch (const std::exception& e)
2570 {
Mike Kelly46272802019-08-14 17:00:48 +01002571 return Fail("%s: %s", __func__, e.what());
2572 }
2573
2574 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2575 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2576
2577 ActivationFn activationFunction;
2578 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2579 {
2580 return Fail("%s: Operation has invalid inputs", __func__);
2581 }
2582
2583 armnn::FullyConnectedDescriptor desc;
2584 desc.m_TransposeWeightMatrix = true;
2585 desc.m_BiasEnabled = true;
2586
2587 bool isSupported = false;
2588 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2589 IsFullyConnectedSupported,
2590 data.m_Backends,
2591 isSupported,
2592 reshapedInfo,
2593 outputInfo,
2594 weights.GetInfo(),
2595 bias.GetInfo(),
2596 desc);
2597 if (!isSupported)
2598 {
2599 return false;
2600 }
2601
2602 armnn::IConnectableLayer* startLayer =
2603 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2604 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2605
2606 if (endLayer != nullptr)
2607 {
2608 if (inputInfo.GetNumDimensions() > 2U)
2609 {
2610 armnn::ReshapeDescriptor reshapeDescriptor;
2611 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2612
2613 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2614 assert(reshapeLayer != nullptr);
2615 input.Connect(reshapeLayer->GetInputSlot(0));
2616 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2617 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2618 }
2619 else
2620 {
2621 input.Connect(startLayer->GetInputSlot(0));
2622 }
2623
2624 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2625 }
2626 else
2627 {
2628 return Fail("%s: ProcessActivation failed", __func__);
2629 }
2630}
2631
2632template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002633 typename HalOperation = typename HalPolicy::Operation,
2634 typename HalModel = typename HalPolicy::Model>
2635bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002636{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002637 using HalOperand = typename HalPolicy::Operand;
2638
Mike Kelly999e2092019-08-15 10:46:46 +01002639 if (operation.inputs.size() != 1)
2640 {
2641 return Fail("%s: Optional inputs are not supported", __func__);
2642 }
2643
Mike Kelly46272802019-08-14 17:00:48 +01002644 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2645 if (!input.IsValid())
2646 {
2647 return Fail("%s: Operation has invalid inputs", __func__);
2648 }
2649
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002650 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002651 if (!output)
2652 {
2653 return Fail("%s: Could not read output 0", __func__);
2654 }
2655
2656 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2657 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2658
2659 if (IsDynamicTensor(outputInfo))
2660 {
2661 return Fail("%s: Dynamic output tensors are not supported", __func__);
2662 }
2663 if (outputInfo.GetNumDimensions() != 4u)
2664 {
2665 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2666 }
2667
2668 armnn::L2NormalizationDescriptor desc;
2669 desc.m_DataLayout = armnn::DataLayout::NHWC;
2670
2671 bool isSupported = false;
2672 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2673 IsL2NormalizationSupported,
2674 data.m_Backends,
2675 isSupported,
2676 inputInfo,
2677 outputInfo,
2678 desc);
2679 if (!isSupported)
2680 {
2681 return false;
2682 }
2683
2684 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2685 assert(layer != nullptr);
2686 input.Connect(layer->GetInputSlot(0));
2687
2688 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2689}
2690
2691template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002692 typename HalOperation = typename HalPolicy::Operation,
2693 typename HalModel = typename HalPolicy::Model>
2694bool ConvertLocalResponseNormalization(const HalOperation& operation,
2695 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002696 ConversionData& data)
2697{
Mike Kelly999e2092019-08-15 10:46:46 +01002698 if (operation.inputs.size() != 5)
2699 {
2700 return Fail("%s: Optional inputs are not supported", __func__);
2701 }
2702
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002703 using HalOperand = typename HalPolicy::Operand;
2704 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002705
2706 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2707 if (!input.IsValid())
2708 {
2709 return Fail("%s: Operation has invalid inputs", __func__);
2710 }
2711
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002712 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002713 if (!output)
2714 {
2715 return Fail("%s: Could not read output 0", __func__);
2716 }
2717
2718 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2719 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2720
2721 if (IsDynamicTensor(outputInfo))
2722 {
2723 return Fail("%s: Dynamic output tensors are not supported", __func__);
2724 }
2725 if (outputInfo.GetNumDimensions() != 4u)
2726 {
2727 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2728 }
2729
2730 armnn::NormalizationDescriptor descriptor;
2731 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2732 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2733 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2734
2735 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002736 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002737 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2738 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2739 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2740 {
2741 return Fail("%s: Operation has invalid inputs", __func__);
2742 }
2743
2744 // ArmNN expects normSize to be the full size of the normalization
2745 // window rather than the radius as in AndroidNN.
2746 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2747
2748 bool isSupported = false;
2749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2750 IsNormalizationSupported,
2751 data.m_Backends,
2752 isSupported,
2753 inputInfo,
2754 outputInfo,
2755 descriptor);
2756 if (!isSupported)
2757 {
2758 return false;
2759 }
2760
2761
2762 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2763 assert(layer != nullptr);
2764 input.Connect(layer->GetInputSlot(0));
2765
2766 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2767}
2768
2769template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002770 typename HalOperation = typename HalPolicy::Operation,
2771 typename HalModel = typename HalPolicy::Model>
2772bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002773{
Mike Kelly46272802019-08-14 17:00:48 +01002774 armnn::ActivationDescriptor desc;
2775 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2776
2777 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2778}
2779
2780template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002781 typename HalOperation = typename HalPolicy::Operation,
2782 typename HalModel = typename HalPolicy::Model>
2783bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002784{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002785 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002786
2787 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2788 if (!input.IsValid())
2789 {
2790 return Fail("%s: Operation has invalid inputs", __func__);
2791 }
2792
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002793 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002794 if (!output)
2795 {
2796 return Fail("%s: Could not read output 0", __func__);
2797 }
2798
2799 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2800 if (IsDynamicTensor(outputInfo))
2801 {
2802 return Fail("%s: Dynamic output tensors are not supported", __func__);
2803 }
2804
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002805 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002806 if (!axisOperand)
2807 {
2808 return Fail("%s: Could not read input 1", __func__);
2809 }
2810
2811 std::vector<int32_t> axis;
2812 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2813 {
2814 return Fail("%s: Input 1 has invalid values", __func__);
2815 }
2816
2817 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2818
2819 // Convert the axis to unsigned int and remove duplicates.
2820 unsigned int rank = inputInfo.GetNumDimensions();
2821 std::set<unsigned int> uniqueAxis;
2822 std::transform(axis.begin(), axis.end(),
2823 std::inserter(uniqueAxis, uniqueAxis.begin()),
2824 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2825
2826 // Get the "keep dims" flag.
2827 int32_t keepDims = 0;
2828 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2829 {
2830 return Fail("%s: Could not read input 2", __func__);
2831 }
2832
2833 armnn::MeanDescriptor descriptor;
2834 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2835 descriptor.m_KeepDims = keepDims > 0;
2836
2837 bool isSupported = false;
2838 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2839 IsMeanSupported,
2840 data.m_Backends,
2841 isSupported,
2842 inputInfo,
2843 outputInfo,
2844 descriptor);
2845 if (!isSupported)
2846 {
2847 return false;
2848 }
2849
2850 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2851 assert(layer != nullptr);
2852 input.Connect(layer->GetInputSlot(0));
2853
2854 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2855}
2856
2857template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002858 typename HalOperation = typename HalPolicy::Operation,
2859 typename HalModel = typename HalPolicy::Model>
2860bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002861{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002862 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002863
2864 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2865 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2866
2867 if (!input0.IsValid() || !input1.IsValid())
2868 {
2869 return Fail("%s: Operation has invalid inputs", __func__);
2870 }
2871
2872 // The FuseActivation parameter is always the input index 2
2873 // and it should be optional
2874 ActivationFn activationFunction;
2875 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2876 {
2877 return Fail("%s: Operation has invalid inputs", __func__);
2878 }
2879
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002880 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002881
2882 if (outputOperand == nullptr)
2883 {
2884 return false;
2885 }
2886
2887 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2888 if (IsDynamicTensor(outputInfo))
2889 {
2890 return Fail("%s: Dynamic output tensors are not supported", __func__);
2891 }
2892
2893 bool isSupported = false;
2894 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2895 IsMultiplicationSupported,
2896 data.m_Backends,
2897 isSupported,
2898 input0.GetTensorInfo(),
2899 input1.GetTensorInfo(),
2900 outputInfo);
2901 if (!isSupported)
2902 {
2903 return false;
2904 }
2905
2906 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2907 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2908
2909 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2910 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2911
2912 if (endLayer != nullptr)
2913 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002914 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2915 if (!isReshapeSupported)
2916 {
2917 return false;
2918 }
2919
Mike Kelly46272802019-08-14 17:00:48 +01002920 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2921 }
2922 else
2923 {
2924 return Fail("%s: ProcessActivation failed", __func__);
2925 }
2926}
2927
2928template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002929 typename HalOperation = typename HalPolicy::Operation,
2930 typename HalModel = typename HalPolicy::Model>
2931bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002932{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002933 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002934
Mike Kelly3c673942019-07-25 09:26:06 +01002935 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2936 if (!input.IsValid())
2937 {
2938 return Fail("%s: Operation has invalid inputs", __func__);
2939 }
2940
2941 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2942 unsigned int rank = inputInfo.GetNumDimensions();
2943
2944 armnn::PadDescriptor descriptor;
2945 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2946 {
2947 return Fail("%s: Could not convert paddings", __func__);
2948 }
2949
2950 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2951 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2952 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2953 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2954 {
2955 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2956 }
2957
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002958 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002959 if (!output)
2960 {
2961 return Fail("%s: Could not read output", __func__);
2962 }
2963
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002964 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002965 if (IsDynamicTensor(outputInfo))
2966 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002967 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002968 }
2969
2970 bool isSupported = false;
2971 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2972 IsPadSupported,
2973 data.m_Backends,
2974 isSupported,
2975 inputInfo,
2976 outputInfo,
2977 descriptor);
2978 if (!isSupported)
2979 {
2980 return false;
2981 }
2982
2983 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2984 assert(layer != nullptr);
2985 input.Connect(layer->GetInputSlot(0));
2986 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2987
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002988 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002989}
2990
Mike Kelly0a879362019-07-29 16:56:31 +01002991template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002992 typename HalOperation = typename HalPolicy::Operation,
2993 typename HalModel = typename HalPolicy::Model>
2994bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002995{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002996 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002997
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002998 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2999 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3000 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003001
3002 if (inputOperand == nullptr
3003 || requestedShapeOperand == nullptr
3004 || outputOperand == nullptr)
3005 {
3006 return Fail("%s: Operation has invalid inputs", __func__);
3007 }
3008
3009 if (requestedShapeOperand->dimensions.size() != 1)
3010 {
3011 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3012 __func__, requestedShapeOperand->dimensions.size());
3013 }
3014
3015 std::vector<int32_t> targetDimensions;
3016 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3017 {
3018 return Fail("%s: Could not read values of input 1", __func__);
3019 }
3020
3021 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3022
3023 Shape requestedShape;
3024 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3025 // function that resolves these values into a fully specified tensor shape.
3026 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3027 {
3028 return Fail("%s: Failed to resolve the requested shape", __func__);
3029 }
3030
3031 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3032 if (!SameShape(requestedShape, outputOperandShape))
3033 {
3034 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3035 }
3036
3037 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3038 if (!input.IsValid())
3039 {
3040 return Fail("%s: Could not read input 0", __func__);
3041 }
3042
3043 armnn::ReshapeDescriptor reshapeDescriptor;
3044 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3045 requestedShape.dimensions.data());
3046
3047 bool isSupported = false;
3048 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3049 IsReshapeSupported,
3050 data.m_Backends,
3051 isSupported,
3052 input.GetTensorInfo(),
3053 reshapeDescriptor);
3054 if (!isSupported)
3055 {
3056 return false;
3057 }
3058
3059 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3060 assert(layer != nullptr);
3061 input.Connect(layer->GetInputSlot(0));
3062
3063 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3064}
3065
3066template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003067 typename HalOperation = typename HalPolicy::Operation,
3068 typename HalModel = typename HalPolicy::Model>
3069bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003070{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003071 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003072
Mike Kelly0a879362019-07-29 16:56:31 +01003073 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3074 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3075
3076 if (!input0.IsValid() || !input1.IsValid())
3077 {
3078 return Fail("%s: Operation has invalid inputs", __func__);
3079 }
3080
3081 // The FuseActivation parameter is always the input index 2
3082 // and it should be optional
3083 ActivationFn activationFunction;
3084 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3085 {
3086 return Fail("%s: Operation has invalid inputs", __func__);
3087 }
3088
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003089 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003090 if (!output)
3091 {
3092 return Fail("%s: Could not read output 0", __func__);
3093 }
3094
3095 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3096 if (IsDynamicTensor(outputInfo))
3097 {
3098 return Fail("%s: Dynamic output tensors are not supported", __func__);
3099 }
3100
3101 bool isSupported = false;
3102 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3103 IsSubtractionSupported,
3104 data.m_Backends,
3105 isSupported,
3106 input0.GetTensorInfo(),
3107 input1.GetTensorInfo(),
3108 outputInfo);
3109 if (!isSupported)
3110 {
3111 return false;
3112 }
3113
3114 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3115 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3116
3117 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3118 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3119
3120 if (endLayer)
3121 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01003122 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3123 if (!isReshapeSupported)
3124 {
3125 return false;
3126 }
Mike Kelly0a879362019-07-29 16:56:31 +01003127 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3128 }
3129
3130 return Fail("%s: ProcessActivation failed", __func__);
3131}
3132
Finn Williams23b87b32019-07-30 11:44:05 +01003133template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003134 typename HalOperation = typename HalPolicy::Operation,
3135 typename HalModel = typename HalPolicy::Model>
3136bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003137{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003138 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003139
3140 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3141 if (!input.IsValid())
3142 {
3143 return Fail("%s: Operation has invalid inputs", __func__);
3144 }
3145
3146 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3147 unsigned int rank = inputInfo.GetNumDimensions();
3148 if (rank > 4)
3149 {
3150 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3151 }
3152
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003153 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003154 if (!output)
3155 {
3156 return Fail("%s: Could not read output 0", __func__);
3157 }
3158
3159 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3160 {
3161 return Fail("%s: Dynamic output tensors are not supported", __func__);
3162 }
3163
3164 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3165 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003166 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003167
3168 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3169
3170 std::vector<int32_t> axis;
3171 if (!axisOperand)
3172 {
3173 axis.assign(dimensionSequence,
3174 dimensionSequence + rank);
3175 }
3176 else
3177 {
3178 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3179 }
3180
3181 std::vector<uint32_t> outputDims;
3182 for (unsigned int i = 0; i < rank; i++)
3183 {
3184 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3185 auto currentDimension = inputInfo.GetShape()[i];
3186 if (skipSqueeze || currentDimension != 1)
3187 {
3188 outputDims.push_back(currentDimension);
3189 }
3190 }
3191
3192 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3193
3194 armnn::TensorInfo outputInfo = inputInfo;
3195 outputInfo.SetShape(outShape);
3196
3197 armnn::ReshapeDescriptor reshapeDesc;
3198 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3199
3200 bool isSupported = false;
3201 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3202 IsReshapeSupported,
3203 data.m_Backends,
3204 isSupported,
3205 inputInfo,
3206 reshapeDesc);
3207 if (!isSupported)
3208 {
3209 return false;
3210 }
3211
3212 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3213 assert(layer != nullptr);
3214 input.Connect(layer->GetInputSlot(0));
3215
3216 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3217}
3218
3219template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003220 typename HalOperation = typename HalPolicy::Operation,
3221 typename HalModel = typename HalPolicy::Model>
3222bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003223{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003224 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003225
3226 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3227 if (!input.IsValid())
3228 {
3229 return Fail("%s: Operation has invalid inputs", __func__);
3230 }
3231
3232 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3233 unsigned int rank = inputInfo.GetNumDimensions();
3234 if (rank > 4)
3235 {
3236 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3237 }
3238
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003239 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003240 if (!output)
3241 {
3242 return Fail("%s: Could not read output 0", __func__);
3243 }
3244
3245 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3246 if (IsDynamicTensor(outputInfo))
3247 {
3248 return Fail("%s: Dynamic output tensors are not supported", __func__);
3249 }
3250
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003251 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3252 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3253 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003254
3255 std::vector<int32_t> beginValues;
3256 std::vector<int32_t> endValues;
3257 std::vector<int32_t> stridesValues;
3258
3259 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003260 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003261 {
3262 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3263 {
3264 return false;
3265 }
3266
3267 if (operandValues.size() != rank)
3268 {
3269 return false;
3270 }
3271
3272 return true;
3273 };
3274
3275 if (!ValidateInputOperands(*beginOperand, beginValues)
3276 || !ValidateInputOperands(*endOperand, endValues)
3277 || !ValidateInputOperands(*stridesOperand, stridesValues))
3278 {
3279 return Fail("%s: Operation has invalid input operand", __func__);
3280 }
3281
3282 // Stride cannot have value '0'
3283 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3284 {
3285 return Fail("%s: Stride must be non-zero value.", __func__);
3286 }
3287
3288 armnn::StridedSliceDescriptor descriptor;
3289 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3290 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3291 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3292 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3293
3294 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3295 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3296 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3297 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3298 {
3299 return Fail("%s: Operation has invalid inputs", __func__);
3300 }
3301
3302 bool isSupported = false;
3303 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3304 IsStridedSliceSupported,
3305 data.m_Backends,
3306 isSupported,
3307 inputInfo,
3308 outputInfo,
3309 descriptor);
3310 if (!isSupported)
3311 {
3312 return false;
3313 }
3314
3315 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3316 assert(layer != nullptr);
3317 input.Connect(layer->GetInputSlot(0));
3318
3319 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3320}
3321
3322template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003323 typename HalOperation = typename HalPolicy::Operation,
3324 typename HalModel = typename HalPolicy::Model>
3325bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003326{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003327 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003328
3329 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3330 if (!input.IsValid())
3331 {
3332 return Fail("%s: Operation has invalid inputs", __func__);
3333 }
3334
3335 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3336 unsigned int rank = inputInfo.GetNumDimensions();
3337 if (rank > 4)
3338 {
3339 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3340 }
3341
3342 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3343 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003344 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003345
3346 std::vector<int32_t> perm(rank);
3347 if (!permOperand)
3348 {
3349 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3350 for (unsigned int i = rank; i > 0; i--)
3351 {
3352 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3353 }
3354 }
3355 else
3356 {
3357 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3358 }
3359
3360 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3361
3362 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3363 if (!permutationVector.IsEqual(NHWCToArmNN)
3364 && !permutationVector.IsEqual(ArmNNToNHWC)
3365 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3366 {
3367 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3368 }
3369
3370 armnn::PermuteDescriptor permuteDesc;
3371 permuteDesc.m_DimMappings = permutationVector;
3372
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003373 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003374 if (!output)
3375 {
3376 return Fail("%s: Could not read output 0", __func__);
3377 }
3378
3379 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3380
3381 bool isSupported = false;
3382 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3383 IsPermuteSupported,
3384 data.m_Backends,
3385 isSupported,
3386 inputInfo,
3387 outputInfo,
3388 permuteDesc);
3389 if (!isSupported)
3390 {
3391 return false;
3392 }
3393
3394 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3395 assert(layer != nullptr);
3396 input.Connect(layer->GetInputSlot(0));
3397
3398 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3399}
3400
3401template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003402 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003403 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003404 typename HalModel = typename HalPolicy::Model>
3405bool ConvertBatchToSpaceNd(const HalOperation& operation,
3406 const HalModel& model,
3407 ConversionData& data)
3408{
Finn Williams23b87b32019-07-30 11:44:05 +01003409
3410 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3411 if (!input.IsValid())
3412 {
3413 return Fail("%s: Operation has invalid inputs", __func__);
3414 }
3415
3416 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3417 if (!output)
3418 {
3419 return Fail("%s: Could not read output 0", __func__);
3420 }
3421
3422 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3423 if (IsDynamicTensor(outputInfo))
3424 {
3425 return Fail("%s: Dynamic output tensors are not supported", __func__);
3426 }
3427
3428 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3429 if (!blockOperand)
3430 {
3431 return Fail("%s: Could not read input 1", __func__);
3432 }
3433
3434 // Convert the block operand to int32
3435 std::vector<int32_t> block;
3436 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3437 {
3438 return Fail("%s: Input 1 has invalid values", __func__);
3439 }
3440
3441 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3442
3443 unsigned int rank = inputInfo.GetNumDimensions();
3444 if (rank != 4)
3445 {
3446 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3447 }
3448
3449 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3450 {
3451 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3452 " greater than or equal to 1", __func__);
3453 }
3454
3455 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3456 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3457 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3458
3459 if (Is12Operand(*output))
3460 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003461 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003462 }
3463 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3464 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3465
3466 bool isSupported = false;
3467 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3468 IsBatchToSpaceNdSupported,
3469 data.m_Backends,
3470 isSupported,
3471 inputInfo,
3472 outputInfo,
3473 batchToSpaceNdDesc);
3474 if (!isSupported)
3475 {
3476 return false;
3477 }
3478
3479 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3480 assert(layer != nullptr);
3481 input.Connect(layer->GetInputSlot(0));
3482
3483 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3484}
Mike Kelly0a879362019-07-29 16:56:31 +01003485
Finn Williamsd74c5052019-07-30 17:06:00 +01003486template<typename HalPolicy,
3487 typename HalOperation = typename HalPolicy::Operation,
3488 typename HalOperand = typename HalPolicy::Operand,
3489 typename HalModel = typename HalPolicy::Model>
3490bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3491{
3492 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3493 if (!input.IsValid())
3494 {
3495 return Fail("%s: Operation has invalid inputs", __func__);
3496 }
3497
3498 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3499 unsigned int rank = inputInfo.GetNumDimensions();
3500 unsigned int spatialDim = rank - 2;
3501
3502 if (rank != 4)
3503 {
3504 Fail("%s: Only inputs with rank 4 are supported", __func__);
3505 }
3506
3507 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3508 if (!output)
3509 {
3510 return Fail("%s: Could not read output 0", __func__);
3511 }
3512
3513 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3514 if (IsDynamicTensor(outputInfo))
3515 {
3516 return Fail("%s: Dynamic output tensors are not supported", __func__);
3517 }
3518
3519 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3520 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3521
3522 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3523 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3524 {
3525 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3526 }
3527
3528 std::vector<int32_t> blockShape;
3529 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3530 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3531 {
3532 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3533 }
3534
3535 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3536 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3537 {
3538 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3539 }
3540
3541 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3542 std::vector<int32_t> paddings;
3543 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3544 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3545 {
3546 int paddingBeforeInput = paddings[i];
3547 int paddingAfterInput = paddings[i + 1];
3548 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3549 {
3550 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3551 }
3552
3553 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3554 }
3555
3556 armnn::SpaceToBatchNdDescriptor descriptor;
3557 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3558 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3559 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3560
3561 if (Is12Operand(*output))
3562 {
3563 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3564 }
3565
3566 bool isSupported = false;
3567 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3568 IsSpaceToBatchNdSupported,
3569 data.m_Backends,
3570 isSupported,
3571 inputInfo,
3572 outputInfo,
3573 descriptor);
3574 if (!isSupported)
3575 {
3576 return false;
3577 }
3578
3579 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3580 assert(layer != nullptr);
3581 input.Connect(layer->GetInputSlot(0));
3582
3583 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3584}
3585
Kevin May407718f2019-09-09 14:46:41 +01003586template<typename HalPolicy,
3587 typename HalOperation = typename HalPolicy::Operation,
3588 typename HalModel = typename HalPolicy::Model>
3589bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3590{
3591 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3592
3593 if (!input.IsValid())
3594 {
3595 return Fail("%s: Operation has invalid input", __func__);
3596 }
3597
3598 using HalOperand = typename HalPolicy::Operand;
3599 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3600 if (!output)
3601 {
3602 return Fail("%s: Could not read output 0", __func__);
3603 }
3604
3605 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3606 if (IsDynamicTensor(outputInfo))
3607 {
3608 return Fail("%s: Dynamic output tensors are not supported", __func__);
3609 }
3610
3611 bool isSupported = false;
3612 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3613 IsAbsSupported,
3614 data.m_Backends,
3615 isSupported,
3616 input.GetTensorInfo(),
3617 outputInfo);
3618
3619 if (!isSupported)
3620 {
3621 return false;
3622 }
3623
3624 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3625 assert(layer != nullptr);
3626 input.Connect(layer->GetInputSlot(0));
3627
3628 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3629}
3630
3631
saoste01b8471482018-10-10 09:44:51 +01003632} // namespace armnn_driver