blob: b3f1ac722c0c824636eb4b33886c85840eea5cde [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
Keith Davis71006492020-01-06 17:44:16 +0000184// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
186{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000187 return type == V1_2::OperandType::BOOL ||
188 type == V1_2::OperandType::TENSOR_FLOAT16 ||
189 type == V1_2::OperandType::TENSOR_FLOAT32 ||
190 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000192 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
193 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194 type == V1_2::OperandType::TENSOR_INT32;
195}
196
197#endif
198
199inline bool IsBool(V1_0::Operand)
200{
201 return false;
202}
203
Sadik Armagan61113162019-07-25 09:09:40 +0100204inline bool Is12Operand(V1_0::Operand)
205{
206 return false;
207}
208
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209#ifdef ARMNN_ANDROID_NN_V1_2
210
211inline bool IsBool(V1_2::Operand operand)
212{
213 return operand.type == V1_2::OperandType::BOOL;
214}
215
Sadik Armagan61113162019-07-25 09:09:40 +0100216/// Checks if a operand is 1_2 Operand
217inline bool Is12Operand(V1_2::Operand)
218{
219 return true;
220}
221
Mike Kellyb5fdf382019-06-11 16:35:25 +0100222#endif
223
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100224template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000225armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
226 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 armnn::TensorInfo reshapeInfo)
228{
229 armnn::ReshapeDescriptor reshapeDescriptor;
230 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
231
232 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
233 BOOST_ASSERT(reshapeLayer != nullptr);
234
235 // Attach the input layer to the reshape layer
236 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
237 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
238
239 return *reshapeLayer;
240}
241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000242bool BroadcastTensor(LayerInputHandle& input0,
243 LayerInputHandle& input1,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000244 const armnn::TensorInfo& outputInfo,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000245 armnn::IConnectableLayer* startLayer,
246 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100247{
248 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100249
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100250 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
251 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
252
253 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
254 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
255
256 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100257 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100258 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
259 input0.Connect(startLayer->GetInputSlot(0));
260 input1.Connect(startLayer->GetInputSlot(1));
261
Sadik Armagan64b19b52019-08-19 09:49:58 +0100262 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100263 }
264
265 // Since the number of dimensions do not match then we need to add degenerate dimensions
266 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
267
268 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
269 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
270 boost::numeric_cast<int>(inputDimensions1));
271
272 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
273 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
274 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
275
276 const armnn::TensorShape& smallShape = smallInfo.GetShape();
277 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
278 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
279 {
280 reshapedDimensions[i] = smallShape[i - sizeDifference];
281 }
282
283 armnn::TensorInfo reshapedInfo = smallInfo;
284 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
285 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100286
287 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
288 armnn::ReshapeDescriptor reshapeDescriptor;
289
290 bool isSupported = false;
291 FORWARD_LAYER_SUPPORT_FUNC(__func__,
292 IsReshapeSupported,
293 data.m_Backends,
294 isSupported,
295 reshapedInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000296 outputInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100297 reshapeDescriptor);
298 if (!isSupported)
299 {
300 return false;
301 }
302
303 BOOST_ASSERT(data.m_Network != nullptr);
304 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100305
306 if (input0IsSmaller)
307 {
308 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
309 //
310 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100311 // | |
312 // Reshape |
313 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100314 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100315
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100316 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
317 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100318 }
319 else
320 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100321 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
322 //
323 // Input0 Input1
324 // | |
325 // | Reshape
326 // \ /
327 // StartLayer
328
arovir01b0717b52018-09-05 17:03:25 +0100329 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100330 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100331 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100332
333 return true;
arovir01b0717b52018-09-05 17:03:25 +0100334}
335
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000336void CalcPadding(uint32_t input,
337 uint32_t kernel,
338 uint32_t stride,
339 uint32_t& outPadHead,
340 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100341 android::nn::PaddingScheme scheme)
342{
343 int32_t padHead;
344 int32_t padTail;
345 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
346 outPadHead = boost::numeric_cast<uint32_t>(padHead);
347 outPadTail = boost::numeric_cast<uint32_t>(padTail);
348}
349
Mike Kelly86b36d42019-07-12 16:39:33 +0100350#ifdef ARMNN_ANDROID_NN_V1_2
351
352void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
353 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
354{
355 int32_t padHead;
356 int32_t padTail;
357 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
358 outPadHead = boost::numeric_cast<uint32_t>(padHead);
359 outPadTail = boost::numeric_cast<uint32_t>(padTail);
360}
361
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100362void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
363 int32_t& outPadTail, android::nn::PaddingScheme scheme)
364{
365 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
366}
367
Mike Kelly86b36d42019-07-12 16:39:33 +0100368#endif
369
Matthew Bentham912b3622019-05-03 15:49:14 +0100370Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100371{
372 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100373 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100374 shape.dimensions = operand.dimensions;
375 shape.scale = operand.scale;
376 shape.offset = operand.zeroPoint;
377 return shape;
378}
379
Mike Kelly46272802019-08-14 17:00:48 +0100380#ifdef ARMNN_ANDROID_NN_V1_2
381
382Shape GetOperandShape(const V1_2::Operand& operand)
383{
384 Shape shape;
385 shape.type = OperandType(operand.type);
386 shape.dimensions = operand.dimensions;
387 shape.scale = operand.scale;
388 shape.offset = operand.zeroPoint;
389 return shape;
390}
391
392#endif
393
arovir01b0717b52018-09-05 17:03:25 +0100394// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
395// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100396// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
397// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100398void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000399 const armnn::TensorInfo& weightInfo,
400 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100401{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000402 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100403 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000404 // NOTE: Bias scale is always set to 0 for per-axis quantization and
405 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
406 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100407 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000408 return biasScale * inputInfo.GetQuantizationScale();
409 };
410
411 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
412 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
413
414 biasInfo.SetQuantizationScales(biasScales);
415 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
416
417 ALOGV("Bias quantization params have been updated for per-axis quantization");
418 }
419 else
420 {
421 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
422 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
423 {
424 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
425 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
426 {
427 ALOGW("Bias quantization scale has been modified to match input * weights");
428 biasInfo.SetQuantizationScale(expectedBiasScale);
429 }
arovir01b0717b52018-09-05 17:03:25 +0100430 }
431 }
432}
433
434// 4D Tensor Permutations
435const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
436const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
437const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
438const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
439
440// 3D Permutation Vectors
441const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
442const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
443const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
444
445template<typename OSlot>
446armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
447 const armnn::PermutationVector& mappings)
448{
449 // Add swizzle layer
450 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
451
452 BOOST_ASSERT(layer != nullptr);
453
454 // Connect input to swizzle layer
455 input.Connect(layer->GetInputSlot(0));
456
457 // Setup swizzled output
458 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
459 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
460
461 return *layer;
462}
463
464void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
465{
466 // Add swizzle layer
467 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
468 // Connect swizzled input to layer
469 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
470}
471
472armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
473{
474 // Add deswizzle layer
475 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
476 return deswizzleLayer;
477}
478
479// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
480armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
481 LayerInputHandle& input,
482 armnn::IConnectableLayer& firstLayer,
483 armnn::IConnectableLayer& lastLayer)
484{
485 SwizzleIn(network, input, firstLayer, 0);
486 return DeswizzleOut(network, lastLayer, 0);
487}
488
489// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
490armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
491 armnn::IConnectableLayer& layer)
492{
493 return SwizzleInDeswizzleOut(network, input, layer, layer);
494}
495
496bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
497 const armnn::TensorShape & outputShape,
498 uint32_t concatDim)
499{
500 // Validate the output shape is correct given the input shapes (which have just been validated)
501 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
502 if (outputShape.GetNumDimensions() != numDimensions)
503 {
504 return Fail("%s: Output shape has wrong number of dimensions", __func__);
505 }
506
507 unsigned int outputSizeAlongConcatenatedDimension = 0;
508 for (unsigned int i = 0; i < inputShapes.size(); i++)
509 {
510 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
511 }
512
513 for (unsigned int i = 0; i < numDimensions; ++i)
514 {
515 if (i == concatDim)
516 {
517 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
518 {
519 return Fail(
520 "%s: Invalid output shape for dimension %d (%d != %d)",
521 __func__,
522 i,
523 outputShape[i],
524 outputSizeAlongConcatenatedDimension);
525 }
526 }
527 else
528 {
529 if (outputShape[i] != inputShapes[0][i])
530 {
531 return Fail("%s: Invalid output shape", __func__);
532 }
533 }
534 }
535
536 return true;
537}
538
539bool RequiresReshape(armnn::TensorShape & inputShape)
540{
541 return inputShape.GetNumDimensions() < 3;
542}
543
arovir01b0717b52018-09-05 17:03:25 +0100544void SwizzleInputs(armnn::INetwork& network,
545 std::vector<LayerInputHandle>& inputs,
546 std::vector<armnn::TensorShape>& inputShapes,
547 const armnn::PermutationVector& mapping)
548{
549 if (!mapping.IsEqual(IdentityPermutation4D))
550 {
551 size_t nInputs = inputs.size();
552 for (size_t i=0; i<nInputs; ++i)
553 {
554 // add swizzle layer
555 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
556 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
557 auto& outputInfo = outputSlot.GetTensorInfo();
558 // replace inputs with the swizzled ones
559 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
560 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
561 }
562 }
563}
564
Kevin Mayaed08ac2019-12-12 16:33:31 +0000565bool CheckReshapeSupported(ConversionData& data,
566 std::vector<LayerInputHandle>& inputs,
567 std::vector<armnn::TensorShape>& inputShapes,
568 const armnn::PermutationVector& mapping,
569 const armnn::TensorInfo& outputInfo)
570{
571 if (!mapping.IsEqual(IdentityPermutation4D))
572 {
573 size_t nInputs = inputs.size();
574 for (size_t i=0; i<nInputs; ++i)
575 {
576 // check permute layer
577 armnn::PermuteDescriptor permuteDesc;
578 permuteDesc.m_DimMappings = mapping;
579
580 bool isSupported = false;
581 FORWARD_LAYER_SUPPORT_FUNC(__func__,
582 IsPermuteSupported,
583 data.m_Backends,
584 isSupported,
585 inputs[i].GetTensorInfo(),
586 outputInfo,
587 permuteDesc);
588 if (!isSupported)
589 {
590 return false;
591 }
592
593 }
594 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
595 }
596 return true;
597}
598
599
narpra01f176d5a2018-11-18 20:17:48 +0000600bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
601 int32_t & concatDimension,
602 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100603{
narpra01f176d5a2018-11-18 20:17:48 +0000604 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100605 BOOST_ASSERT(numberOfDimensions >= 3);
606
607 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000608 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
609 // or along dimension 0 or 2 for a 3-D tensor.
610 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100611 {
narpra01f176d5a2018-11-18 20:17:48 +0000612 concatDimension = 1;
613 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
614 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100615 }
narpra01f176d5a2018-11-18 20:17:48 +0000616 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100617 {
narpra01f176d5a2018-11-18 20:17:48 +0000618 concatDimension = 0;
619 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
620 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100621 }
narpra01f176d5a2018-11-18 20:17:48 +0000622 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100623}
624
625} // anonymous namespace
626
627namespace armnn_driver
628{
629
630//// Creates an ArmNN activation layer and connects it to the given layer, if the
631//// passed in AndroidNN activation function requires so.
632//// @return The end layer of the sequence of layers built for the given AndroidNN
633//// activation function or nullptr if an error occurred (e.g. unsupported activation).
634//// Note that the end layer matches the input layer if no activation is required
635//// (the sequence of layers has length 1).
636armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
637 ActivationFn activation,
638 armnn::IConnectableLayer* prevLayer,
639 ConversionData& data);
640
641} // namespace armnn_driver
642
643///
644/// Utility templates
645///
646
647namespace armnn_driver
648{
649
650using namespace android::nn;
651
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100652template<typename HalPolicy,
653 typename HalOperand = typename HalPolicy::Operand,
654 typename HalOperation = typename HalPolicy::Operation,
655 typename HalModel = typename HalPolicy::Model>
656const HalOperand* GetInputOperand(const HalOperation& operation,
657 uint32_t inputIndex,
658 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100659 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100660{
661 if (inputIndex >= operation.inputs.size())
662 {
saoste01b8471482018-10-10 09:44:51 +0100663 if (failOnIndexOutOfBounds)
664 {
665 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
666 }
arovir01b0717b52018-09-05 17:03:25 +0100667 return nullptr;
668 }
669
670 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
671 return &model.operands[operation.inputs[inputIndex]];
672}
673
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674template<typename HalPolicy,
675 typename HalOperand = typename HalPolicy::Operand,
676 typename HalOperation = typename HalPolicy::Operation,
677 typename HalModel = typename HalPolicy::Model>
678const HalOperand* GetOutputOperand(const HalOperation& operation,
679 uint32_t outputIndex,
680 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100681{
682 if (outputIndex >= operation.outputs.size())
683 {
684 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
685 return nullptr;
686 }
687
688 // Model should have been validated beforehand
689 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
690
691 return &model.operands[operation.outputs[outputIndex]];
692}
693
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100694template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100695 typename HalOperand = typename HalPolicy::Operand,
696 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100697const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100698 const HalModel& model,
699 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000700 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100701{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100702 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100703
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100705 switch (operand.lifetime)
706 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100707 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100708 {
709 // Constant found in model.operandValues
710 valueStart = &model.operandValues[operand.location.offset];
711 break;
712 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100714 {
715 // Constant specified via a Memory object
716 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
717 break;
718 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100719 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000720 {
721 // An optional input tensor with no values is not an error so should not register as a fail
722 if (optional)
723 {
724 valueStart = nullptr;
725 break;
726 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100727 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000728 }
arovir01b0717b52018-09-05 17:03:25 +0100729 default:
730 {
731 // Unsupported/invalid (e.g. can't get value of an input to the model)
732 Fail("%s: unsupported/invalid operand lifetime: %s",
733 __func__, toString(operand.lifetime).c_str());
734 valueStart = nullptr;
735 }
736 }
737
738 return valueStart;
739}
740
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100742 typename HalOperation = typename HalPolicy::Operation,
743 typename HalModel = typename HalPolicy::Model,
744 typename HalOperandType = typename HalPolicy::OperandType>
745bool GetOperandType(const HalOperation& operation,
746 uint32_t inputIndex,
747 const HalModel& model,
748 HalOperandType& type)
749{
750 using HalOperand = typename HalPolicy::Operand;
751
752 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
753 if (!operand)
754 {
755 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
756 }
757
758 type = operand->type;
759 return true;
760}
761
762template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000763 typename HalOperand = typename HalPolicy::Operand>
764bool IsOperandConstant(const HalOperand& operand)
765{
766 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
767
768 HalOperandLifeTime lifetime = operand.lifetime;
769
770 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
771 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
772 lifetime == HalOperandLifeTime::NO_VALUE;
773}
774
775template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776 typename HalOperand = typename HalPolicy::Operand,
777 typename HalModel = typename HalPolicy::Model>
778ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
779 const HalModel& model,
780 const ConversionData& data,
781 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
782 const armnn::TensorShape* overrideTensorShape = nullptr,
783 bool optional = false)
784{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100785 if (!IsOperandTypeSupportedForTensors(operand.type))
786 {
787 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
788 return ConstTensorPin();
789 }
790
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000791 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100792 {
793 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
794 return ConstTensorPin();
795 }
796
797 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
798 if (!valueStart)
799 {
800 if (optional)
801 {
802 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
803 return ConstTensorPin(true);
804 }
805 // mandatory tensor with no values
806 Fail("%s: failed to get operand address", __func__);
807 return ConstTensorPin();
808 }
809
810 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000811 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
812 if (tensorInfo.HasPerAxisQuantization())
813 {
814 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
815 }
816
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100817 if (overrideTensorShape != nullptr)
818 {
819 tensorInfo.SetShape(*overrideTensorShape);
820 }
821 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
822}
823
824template<typename HalPolicy,
825 typename HalOperation = typename HalPolicy::Operation,
826 typename HalModel = typename HalPolicy::Model>
827ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
828 uint32_t inputIndex,
829 const HalModel& model,
830 const ConversionData& data,
831 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
832 const armnn::TensorShape* overrideTensorShape = nullptr,
833 bool optional = false)
834{
835 using HalOperand = typename HalPolicy::Operand;
836
837 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
838 if (!operand)
839 {
840 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
841 return ConstTensorPin();
842 }
843 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
844 model,
845 data,
846 dimensionMappings,
847 overrideTensorShape,
848 optional);
849}
850
851template<typename HalPolicy,
852 typename OutputType,
853 typename HalOperandType = typename HalPolicy::OperandType,
854 typename HalOperation = typename HalPolicy::Operation,
855 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100856bool GetInputScalar(const HalOperation& operation,
857 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100858 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100859 OutputType& outValue,
860 const HalModel& model,
861 const ConversionData& data)
862{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100863 using HalOperand = typename HalPolicy::Operand;
864
865 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100866 if (!operand)
867 {
868 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
869 }
870
871 if (operand->type != type)
872 {
873 return Fail("%s: unexpected operand type: %s (should be %s)",
874 __func__, toString(operand->type).c_str(), toString(type).c_str());
875 }
876
877 if (operand->location.length != sizeof(OutputType))
878 {
879 return Fail("%s: incorrect operand location length: %i (should be %i)",
880 __func__, operand->location.length, sizeof(OutputType));
881 }
882
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100883 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100884 if (!valueAddress)
885 {
886 return Fail("%s: failed to get address for operand", __func__);
887 }
888
889 outValue = *(static_cast<const OutputType*>(valueAddress));
890 return true;
891}
892
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100893template<typename HalPolicy,
894 typename HalOperation = typename HalPolicy::Operation,
895 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100896bool GetInputInt32(const HalOperation& operation,
897 uint32_t inputIndex,
898 int32_t& outValue,
899 const HalModel& model,
900 const ConversionData& data)
901{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100903}
904
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100905template<typename HalPolicy,
906 typename HalOperation = typename HalPolicy::Operation,
907 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100908bool GetInputFloat32(const HalOperation& operation,
909 uint32_t inputIndex,
910 float& outValue,
911 const HalModel& model,
912 const ConversionData& data)
913{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100915}
916
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917template<typename HalPolicy,
918 typename HalOperation = typename HalPolicy::Operation,
919 typename HalOperandType = typename HalPolicy::OperandType,
920 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100921bool GetInputActivationFunctionImpl(const HalOperation& operation,
922 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100923 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100924 ActivationFn& outActivationFunction,
925 const HalModel& model,
926 const ConversionData& data)
927{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100928 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100929 {
930 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
931 __func__,
932 toString(type).c_str(),
933 toString(OperandType::INT32).c_str(),
934 toString(OperandType::TENSOR_INT32).c_str());
935 }
936
937 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100939 {
940 return Fail("%s: failed to get activation input value", __func__);
941 }
942 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
943 return true;
944}
945
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100946template<typename HalPolicy,
947 typename HalOperation = typename HalPolicy::Operation,
948 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100949bool GetInputActivationFunction(const HalOperation& operation,
950 uint32_t inputIndex,
951 ActivationFn& outActivationFunction,
952 const HalModel& model,
953 const ConversionData& data)
954{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100955 return GetInputActivationFunctionImpl<HalPolicy>(operation,
956 inputIndex,
957 HalPolicy::OperandType::INT32,
958 outActivationFunction,
959 model,
960 data);
arovir01b0717b52018-09-05 17:03:25 +0100961}
962
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100963template<typename HalPolicy,
964 typename HalOperation = typename HalPolicy::Operation,
965 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100966bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
967 uint32_t inputIndex,
968 ActivationFn& outActivationFunction,
969 const HalModel& model,
970 const ConversionData& data)
971{
972 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 return GetInputActivationFunctionImpl<HalPolicy>(operation,
974 inputIndex,
975 HalPolicy::OperandType::INT32,
976 outActivationFunction,
977 model,
978 data);
arovir01b0717b52018-09-05 17:03:25 +0100979}
980
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100985bool GetOptionalInputActivation(const HalOperation& operation,
986 uint32_t inputIndex,
987 ActivationFn& activationFunction,
988 const HalModel& model,
989 const ConversionData& data)
990{
991 if (operation.inputs.size() <= inputIndex)
992 {
993 activationFunction = ActivationFn::kActivationNone;
994 }
995 else
996 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100997 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100998 {
999 return Fail("%s: Operation has invalid inputs", __func__);
1000 }
1001 }
1002 return true;
1003}
1004
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001005template<typename HalPolicy,
1006 typename ConvolutionDescriptor,
1007 typename HalOperation = typename HalPolicy::Operation,
1008 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001009bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1010 uint32_t dilationXIndex,
1011 ConvolutionDescriptor& descriptor,
1012 const HalModel& model,
1013 const ConversionData& data)
1014{
1015 bool success = true;
1016 if (operation.inputs.size() >= dilationXIndex + 2)
1017 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001018 success &= GetInputScalar<HalPolicy>(operation,
1019 dilationXIndex,
1020 HalPolicy::OperandType::INT32,
1021 descriptor.m_DilationX,
1022 model,
1023 data);
1024 success &= GetInputScalar<HalPolicy>(operation,
1025 dilationXIndex + 1,
1026 HalPolicy::OperandType::INT32,
1027 descriptor.m_DilationY,
1028 model,
1029 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001030 }
1031
1032 return success;
1033}
1034
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001035template<typename HalPolicy,
1036 typename HalOperand = typename HalPolicy::Operand,
1037 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001038bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001039 std::vector<int32_t>& outValues,
1040 const HalModel& model,
1041 const ConversionData& data)
1042{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001043 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001044 {
1045 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1046 }
1047
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001048 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001049 if (!startAddress)
1050 {
1051 return Fail("%s: failed to get operand address", __func__, operand.type);
1052 }
1053
1054 // Check number of bytes is sensible
1055 const uint32_t numBytes = operand.location.length;
1056 if (numBytes % sizeof(int32_t) != 0)
1057 {
1058 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1059 __func__, numBytes, sizeof(int32_t));
1060 }
1061
1062 outValues.resize(numBytes / sizeof(int32_t));
1063 memcpy(outValues.data(), startAddress, numBytes);
1064 return true;
1065}
1066
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001067template<typename HalPolicy,
1068 typename HalOperation = typename HalPolicy::Operation,
1069 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001070bool GetInputPaddingScheme(const HalOperation& operation,
1071 uint32_t inputIndex,
1072 PaddingScheme& outPaddingScheme,
1073 const HalModel& model,
1074 const ConversionData& data)
1075{
1076 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001077 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001078 {
1079 return Fail("%s: failed to get padding scheme input value", __func__);
1080 }
1081
1082 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1083 return true;
1084}
1085
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001086template<typename HalPolicy,
1087 typename HalOperation = typename HalPolicy::Operation,
1088 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001089LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1090 uint32_t inputIndex,
1091 const HalModel& model,
1092 ConversionData& data)
1093{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001094 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001095 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001096 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1097
1098 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001099 if (!operand)
1100 {
1101 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1102 return LayerInputHandle();
1103 }
1104
1105 if (!IsOperandTypeSupportedForTensors(operand->type))
1106 {
1107 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1108 return LayerInputHandle();
1109 }
1110
Sadik Armagan44bcc022019-06-18 17:21:36 +01001111 try
arovir01b0717b52018-09-05 17:03:25 +01001112 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001113 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001114 if (IsDynamicTensor(operandTensorInfo))
1115 {
1116 Fail("%s: dynamic input tensors are not supported", __func__);
1117 return LayerInputHandle();
1118 }
arovir01b0717b52018-09-05 17:03:25 +01001119
Sadik Armagan44bcc022019-06-18 17:21:36 +01001120 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001121 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001122 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001123 {
1124 // NOTE: We must check whether we can support the input tensor on at least one
1125 // of the provided backends; otherwise we cannot convert the operation
1126 bool isInputSupported = false;
1127 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1128 IsInputSupported,
1129 data.m_Backends,
1130 isInputSupported,
1131 operandTensorInfo);
1132
1133 if (!isInputSupported)
1134 {
1135 Fail("%s: unsupported input tensor", __func__);
1136 return LayerInputHandle();
1137 }
1138
1139 BOOST_FALLTHROUGH; // intentional fallthrough
1140 }
1141 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001142 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001143 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001144 // The tensor is either an operand internal to the model, or a model input.
1145 // It can be associated with an ArmNN output slot for an existing layer.
1146
1147 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1148 const uint32_t operandIndex = operation.inputs[inputIndex];
1149 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001150 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001151 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001152 case HalOperandLifeTime::CONSTANT_REFERENCE:
1153 {
1154 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1155 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1156 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001157 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001158 bool isSupported = false;
1159 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1160 IsConstantSupported,
1161 data.m_Backends,
1162 isSupported,
1163 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001164 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001165 {
1166 return LayerInputHandle();
1167 }
1168
1169 armnn::IConnectableLayer* constantLayer =
1170 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1171 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1172 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1173
1174 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1175 }
1176 else
1177 {
1178 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001179 return LayerInputHandle();
1180 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001181 break;
arovir01b0717b52018-09-05 17:03:25 +01001182 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001183 default:
arovir01b0717b52018-09-05 17:03:25 +01001184 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001185 // Unsupported lifetime for an input tensor
1186 Fail("%s: unsupported lifetime for input tensor: %s",
1187 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001188 return LayerInputHandle();
1189 }
arovir01b0717b52018-09-05 17:03:25 +01001190 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001191 }
1192 catch (UnsupportedOperand<HalOperandType>& e)
1193 {
1194 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1195 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001196 }
1197}
1198
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199template<typename HalPolicy,
1200 typename HalOperation = typename HalPolicy::Operation,
1201 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001202bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1203 uint32_t operationOutputIndex,
1204 armnn::IConnectableLayer& layer,
1205 uint32_t layerOutputIndex,
1206 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001207 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001208{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001209 using HalOperand = typename HalPolicy::Operand;
1210
1211 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001212 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1213 {
1214 return false;
1215 }
1216
1217 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1218
1219 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1220 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1221
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001222 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001223
1224 return true;
1225}
1226
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001227template<typename HalPolicy,
1228 typename HalOperation = typename HalPolicy::Operation,
1229 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001230armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1231 uint32_t inputIndex,
1232 const HalModel& model,
1233 ConversionData& data)
1234{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001235 using HalOperand = typename HalPolicy::Operand;
1236
1237 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001238 if (!operand)
1239 {
1240 return armnn::DataLayout::NHWC;
1241 }
1242
1243 if (!IsBool(*operand))
1244 {
1245 return armnn::DataLayout::NHWC;
1246 }
1247
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001248 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001249 if (!valueAddress)
1250 {
1251 return armnn::DataLayout::NHWC;
1252 }
1253
1254 if (*(static_cast<const bool*>(valueAddress)))
1255 {
1256 return armnn::DataLayout::NCHW;
1257 }
1258 else
1259 {
1260 return armnn::DataLayout::NHWC;
1261 }
1262}
1263
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001264template<typename HalPolicy,
1265 typename HalOperation = typename HalPolicy::Operation,
1266 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001267bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1268 uint32_t outputIndex,
1269 armnn::IConnectableLayer& layer,
1270 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001271 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001272{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001273 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1274 outputIndex,
1275 layer,
1276 outputIndex,
1277 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001278 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001279}
1280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001281template<typename HalPolicy,
1282 typename HalOperation = typename HalPolicy::Operation,
1283 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001284bool ConvertToActivation(const HalOperation& operation,
1285 const char* operationName,
1286 const armnn::ActivationDescriptor& activationDesc,
1287 const HalModel& model,
1288 ConversionData& data)
1289{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001290 using HalOperand = typename HalPolicy::Operand;
1291
1292 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001293 if (!input.IsValid())
1294 {
1295 return Fail("%s: Input 0 is invalid", operationName);
1296 }
1297
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001298 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001299 if (!outputOperand)
1300 {
1301 return false;
1302 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001303
1304 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001305 if (IsDynamicTensor(outInfo))
1306 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001307 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001308 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001309
1310 bool isSupported = false;
1311 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1312 IsActivationSupported,
1313 data.m_Backends,
1314 isSupported,
1315 input.GetTensorInfo(),
1316 outInfo,
1317 activationDesc);
1318 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001319 {
1320 return false;
1321 }
1322
1323 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1324 BOOST_ASSERT(layer != nullptr);
1325 input.Connect(layer->GetInputSlot(0));
1326
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001327 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001328}
1329
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001330template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001331 typename HalOperation = typename HalPolicy::Operation,
1332 typename HalModel = typename HalPolicy::Model>
1333bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1334{
1335 armnn::ActivationDescriptor desc;
1336 desc.m_Function = armnn::ActivationFunction::ReLu;
1337
1338 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1339}
1340
1341template<typename HalPolicy,
1342 typename HalOperation = typename HalPolicy::Operation,
1343 typename HalModel = typename HalPolicy::Model>
1344bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1345{
1346 armnn::ActivationDescriptor desc;
1347 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1348 desc.m_A = 1.0f;
1349 desc.m_B = -1.0f;
1350
1351 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1352}
1353
1354template<typename HalPolicy,
1355 typename HalOperation = typename HalPolicy::Operation,
1356 typename HalModel = typename HalPolicy::Model>
1357bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1358{
1359 armnn::ActivationDescriptor desc;
1360 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1361 desc.m_A = 6.0f;
1362
1363 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1364}
1365
1366template<typename HalPolicy,
1367 typename HalOperation = typename HalPolicy::Operation,
1368 typename HalModel = typename HalPolicy::Model>
1369bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1370{
1371 armnn::ActivationDescriptor desc;
1372 desc.m_Function = armnn::ActivationFunction::TanH;
1373 desc.m_A = 1.0f; // android nn does not support tanH parameters
1374 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1375
1376 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1377}
1378
1379template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001380 typename HalOperation = typename HalPolicy::Operation,
1381 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001382bool ConvertPaddings(const HalOperation& operation,
1383 const HalModel& model,
1384 ConversionData& data,
1385 unsigned int rank,
1386 armnn::PadDescriptor& padDescriptor)
1387{
1388 using HalOperand = typename HalPolicy::Operand;
1389
1390 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1391 if (!paddingsOperand)
1392 {
1393 return Fail("%s: Could not read paddings operand", __func__);
1394 }
1395
1396 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1397 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1398 {
1399 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1400 }
1401
1402 std::vector<int32_t> paddings;
1403 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1404
1405 // add padding for each dimension of input tensor.
1406 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1407 {
1408 int paddingBeforeInput = paddings[i];
1409 int paddingAfterInput = paddings[i + 1];
1410
1411 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1412 {
1413 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1414 }
1415
1416 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1417 }
1418
1419 return true;
1420}
1421
1422template<typename HalPolicy,
1423 typename HalOperation = typename HalPolicy::Operation,
1424 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001425bool ConvertPooling2d(const HalOperation& operation,
1426 const char* operationName,
1427 armnn::PoolingAlgorithm poolType,
1428 const HalModel& model,
1429 ConversionData& data)
1430{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001431 using HalOperand = typename HalPolicy::Operand;
1432 using HalOperandType = typename HalPolicy::OperandType;
1433
1434 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001435 if (!input.IsValid())
1436 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001437 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001438 }
1439
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001440 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001441 if (!output)
1442 {
1443 return Fail("%s: Could not read output 0", __func__);
1444 }
1445
1446 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1447 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1448
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001449 if (IsDynamicTensor(outputInfo))
1450 {
1451 return Fail("%s: Dynamic output tensors are not supported", __func__);
1452 }
1453
arovir01b0717b52018-09-05 17:03:25 +01001454 armnn::Pooling2dDescriptor desc;
1455 desc.m_PoolType = poolType;
1456 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001457 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001458
1459 ActivationFn activation;
1460
Sadik Armagan15d63e22019-07-26 16:59:35 +01001461 auto inputSize = operation.inputs.size();
1462
1463 if (inputSize >= 10)
1464 {
1465 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1466 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1467 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1468 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1469 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1470 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1471 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1472 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1473 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1474 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1475 {
1476 return Fail("%s: Operation has invalid inputs", operationName);
1477 }
1478
1479 if (Is12Operand(*output))
1480 {
1481 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1482 }
1483 }
1484 else
arovir01b0717b52018-09-05 17:03:25 +01001485 {
1486 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1487 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001488 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1489 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1490 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1491 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1492 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1493 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001494 {
1495 return Fail("%s: Operation has invalid inputs", operationName);
1496 }
1497
Sadik Armagan15d63e22019-07-26 16:59:35 +01001498 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001499 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001500 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001501 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001502
1503 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1504 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1505 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1506
1507 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1508 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001509 }
1510
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001511 bool isSupported = false;
1512 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1513 IsPooling2dSupported,
1514 data.m_Backends,
1515 isSupported,
1516 inputInfo,
1517 outputInfo,
1518 desc);
1519 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001520 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001521 return false;
arovir01b0717b52018-09-05 17:03:25 +01001522 }
arovir01b0717b52018-09-05 17:03:25 +01001523
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001524 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1525 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001526 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001527 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001528 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001529
1530 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1531 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001532 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001533 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001534 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001535
1536 input.Connect(pooling2dLayer->GetInputSlot(0));
1537
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001538 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001539}
1540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001541template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001542 typename HalOperation = typename HalPolicy::Operation,
1543 typename HalModel = typename HalPolicy::Model>
1544bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001545{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001546 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001547
1548 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1549 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1550
1551 if (!input0.IsValid() || !input1.IsValid())
1552 {
1553 return Fail("%s: Operation has invalid inputs", __func__);
1554 }
1555
1556 // The FuseActivation parameter is always the input index 2
1557 // and it should be optional
1558 ActivationFn activationFunction;
1559 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1560 {
1561 return Fail("%s: Operation has invalid inputs", __func__);
1562 }
1563
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001564 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001565 if (!outputOperand)
1566 {
1567 return false;
1568 }
1569
1570 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1571 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1572
1573 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1574 if (IsDynamicTensor(outputInfo))
1575 {
1576 return Fail("%s: Dynamic output tensors are not supported", __func__);
1577 }
1578
1579 bool isSupported = false;
1580 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1581 IsAdditionSupported,
1582 data.m_Backends,
1583 isSupported,
1584 inputInfo0,
1585 inputInfo1,
1586 outputInfo);
1587 if (!isSupported)
1588 {
1589 return false;
1590 }
1591
1592 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1593 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1594
1595 if (endLayer != nullptr)
1596 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00001597 bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001598 if (!isReshapeSupported)
1599 {
1600 return false;
1601 }
1602
Mike Kelly46272802019-08-14 17:00:48 +01001603 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1604 }
1605 else
1606 {
1607 return Fail("%s: ProcessActivation failed", __func__);
1608 }
1609}
1610
1611template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001612 typename HalOperation = typename HalPolicy::Operation,
1613 typename HalModel = typename HalPolicy::Model>
1614bool ConvertArgMinMax(const HalOperation& operation,
1615 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001616 ConversionData& data,
1617 armnn::ArgMinMaxFunction argMinMaxFunction)
1618{
1619 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1620
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001621 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001622 using HalOperandType = typename HalPolicy::OperandType;
1623
1624 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1625
1626 if (!input0.IsValid())
1627 {
1628 return Fail("%s: Operation has invalid inputs", __func__);
1629 }
1630
1631 int32_t axis;
1632 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1633 {
1634 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1635 }
1636
1637 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1638 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1639
1640 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1641 {
1642 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1643 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1644 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1645 return Fail("%s: Axis must be in range [-n, n)", __func__);
1646 }
1647
1648 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1649 if (!output)
1650 {
1651 return Fail("%s: Could not read output 0", __func__);
1652 }
1653
1654 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1655
1656 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1657 if (IsDynamicTensor(outputInfo))
1658 {
1659 return Fail("%s: Dynamic output tensors are not supported", __func__);
1660 }
1661
1662 armnn::ArgMinMaxDescriptor descriptor;
1663 descriptor.m_Function = argMinMaxFunction;
1664 descriptor.m_Axis = axis;
1665
1666 bool isSupported = false;
1667 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1668 IsArgMinMaxSupported,
1669 data.m_Backends,
1670 isSupported,
1671 inputInfo0,
1672 outputInfo,
1673 descriptor);
1674 if (!isSupported)
1675 {
1676 return false;
1677 }
1678
1679 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1680 assert(layer != nullptr);
1681
1682 input0.Connect(layer->GetInputSlot(0));
1683
1684 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1685}
1686
1687template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001688 typename HalOperation = typename HalPolicy::Operation,
1689 typename HalModel = typename HalPolicy::Model>
1690bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001691{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001692 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001693 using HalOperandType = typename HalPolicy::OperandType;
1694
1695 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1696 if (operation.inputs.size() <= 1)
1697 {
1698 return Fail("%s: Operation has insufficient arguments", __func__);
1699 }
1700
1701 // Get inputs and outputs
1702 const std::size_t numInputTensors = operation.inputs.size() - 1;
1703
1704 int32_t concatDim;
1705 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1706 {
1707 return Fail("%s: Operation has invalid inputs", __func__);
1708 }
1709
1710 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1711 if (!outputOperand)
1712 {
1713 return Fail("%s: Operation has no outputs", __func__);
1714 }
1715
1716
1717 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1718 armnn::TensorShape outputShape = outputInfo.GetShape();
1719
1720 //
1721 // handle negative concat dims along the lines of tensorflow as described here:
1722 // https://www.tensorflow.org/api_docs/python/tf/concat
1723 // "negative axis refers to axis + rank(values)-th dimension"
1724 //
1725 if (concatDim < 0)
1726 {
1727 concatDim += outputShape.GetNumDimensions();
1728 }
1729
1730 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1731 {
1732 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1733 }
1734
1735 std::vector<LayerInputHandle> inputHandles;
1736 std::vector<armnn::TensorShape> inputShapes;
1737
1738 inputHandles.reserve(numInputTensors);
1739 inputShapes.reserve(numInputTensors);
1740
1741 bool inputsHaveBeenReshaped = false;
1742 unsigned int tensorDimensionsAdded = 0;
1743
1744 for (uint32_t i = 0; i < numInputTensors; ++i)
1745 {
1746 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1747 if (!operand)
1748 {
1749 return Fail("%s: Operation has invalid inputs", __func__);
1750 }
1751
Teresa Charlin3b959602019-10-31 17:05:47 +00001752 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1753 if (!operandInputHandle.IsValid())
1754 {
1755 return Fail("%s: Operation has invalid inputs", __func__);
1756 }
Mike Kellyb8805202019-07-31 17:25:43 +01001757
Teresa Charlin3b959602019-10-31 17:05:47 +00001758 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001759 if (operandShape.GetNumDimensions() == 0)
1760 {
1761 return Fail("%s: Operands with rank 0 are not supported", __func__);
1762 }
1763
1764 if (RequiresReshape(operandShape))
1765 {
1766 inputsHaveBeenReshaped = true;
1767
1768 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1769
1770 // Expand the tensor to three dimensions
1771 if (operandShape.GetNumDimensions() == 2)
1772 {
1773 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1774 tensorDimensionsAdded = 1;
1775 }
1776 else
1777 {
1778 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1779 tensorDimensionsAdded = 2;
1780 }
1781
Kevin Mayaed08ac2019-12-12 16:33:31 +00001782 armnn::ReshapeDescriptor reshapeDescriptor;
1783 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1784
1785 bool isSupported = false;
1786 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1787 IsReshapeSupported,
1788 data.m_Backends,
1789 isSupported,
1790 operandInputHandle.GetTensorInfo(),
1791 reshapeInfo,
1792 reshapeDescriptor);
1793 if (!isSupported)
1794 {
1795 return false;
1796 }
1797
Mike Kellyb8805202019-07-31 17:25:43 +01001798 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1799 *data.m_Network,
1800 operandInputHandle,
1801 reshapeInfo
1802 );
1803
1804 // Point to the reshape operation rather then the input operation
1805 operandShape = reshapeInfo.GetShape();
1806 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1807 }
1808
1809 inputShapes.emplace_back(operandShape);
1810 inputHandles.emplace_back(operandInputHandle);
1811
1812 if (!inputHandles.back().IsValid())
1813 {
1814 return Fail("%s: Operation has invalid inputs", __func__);
1815 }
1816 }
1817
1818 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1819
1820 if (inputsHaveBeenReshaped)
1821 {
1822 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1823 concatDim += tensorDimensionsAdded;
1824
1825 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1826 if (tensorDimensionsAdded == 1)
1827 {
1828 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1829 }
1830 else if (tensorDimensionsAdded == 2)
1831 {
1832 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1833 }
1834 }
1835
1836 // Check if permutations is required and get the pair of permutations required for the concatenation.
1837 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1838 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1839 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1840
1841 bool needPermute =
1842 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1843
1844 if (needPermute)
1845 {
1846 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1847 }
1848
1849 outputInfo.SetShape(outputShape);
1850
1851 // this is no-op for identity swizzles, otherwise it replaces both
1852 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001853 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1854 {
1855 return false;
1856 }
Mike Kellyb8805202019-07-31 17:25:43 +01001857
1858 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1859 armnn::OriginsDescriptor concatDescriptor;
1860
1861 try
1862 {
1863 // The concat descriptor is always created across the only supported concat dimension
1864 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1865 concatDescriptor =
1866 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1867 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001868 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001869 {
1870 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1871 }
1872
1873 // Validate the output shape is correct given the input shapes based on the
1874 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1875 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1876 {
1877 return Fail("%s: Error validating the output shape for concat", __func__);
1878 }
1879
1880 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1881 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1882 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1883
1884 bool isSupported = false;
1885 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1886 IsConcatSupported,
1887 data.m_Backends,
1888 isSupported,
1889 inputTensorInfos,
1890 outputInfo,
1891 concatDescriptor);
1892 if (!isSupported)
1893 {
1894 return false;
1895 }
1896
1897 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1898 assert(layer != nullptr);
1899 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1900
1901 // Connect inputs to the layer
1902 const int numInputSlots = layer->GetNumInputSlots();
1903 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1904 for (int i = 0; i < numInputSlots; ++i)
1905 {
1906 // connect the input directly to the merge (concat) layer
1907 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1908 }
1909
1910 if (needPermute)
1911 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00001912 armnn::PermuteDescriptor permuteDesc;
1913 permuteDesc.m_DimMappings = permutationPair.second;
1914
1915 bool isSupported = false;
1916 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1917 IsPermuteSupported,
1918 data.m_Backends,
1919 isSupported,
1920 layer->GetOutputSlot(0).GetTensorInfo(),
1921 outputInfo,
1922 permuteDesc);
1923 if (!isSupported)
1924 {
1925 return false;
1926 }
Mike Kellyb8805202019-07-31 17:25:43 +01001927 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1928 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1929 layer->GetOutputSlot(0),
1930 permutationPair.second);
1931 layer = &deswizzleLayer;
1932 }
1933
1934 if (inputsHaveBeenReshaped)
1935 {
1936 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1937
1938 // Undo the reshape knowing the amount of dimensions added
1939 if (tensorDimensionsAdded == 1)
1940 {
1941 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1942 afterConcatInfo.GetShape()[2] }));
1943 }
1944 else if (tensorDimensionsAdded == 2)
1945 {
1946 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1947 }
1948
Kevin Mayaed08ac2019-12-12 16:33:31 +00001949 armnn::ReshapeDescriptor reshapeDescriptor;
1950 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1951
1952 bool isSupported = false;
1953 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1954 IsReshapeSupported,
1955 data.m_Backends,
1956 isSupported,
1957 layer->GetOutputSlot(0).GetTensorInfo(),
1958 afterConcatInfo,
1959 reshapeDescriptor);
1960 if (!isSupported)
1961 {
1962 return false;
1963 }
1964
Mike Kellyb8805202019-07-31 17:25:43 +01001965 layer = &AddReshapeLayer(
1966 *data.m_Network,
1967 layer->GetOutputSlot(0),
1968 afterConcatInfo
1969 );
1970 }
1971
1972 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1973}
1974
1975template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001976 typename HalOperation = typename HalPolicy::Operation,
1977 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001978bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1979{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001980 using HalOperand = typename HalPolicy::Operand;
1981 using HalOperandType = typename HalPolicy::OperandType;
1982
1983 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001984 if (!input.IsValid())
1985 {
1986 return Fail("%s: Operation has invalid inputs", __func__);
1987 }
1988
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001989 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001990 if (!output)
1991 {
1992 return Fail("%s: Could not read output 0", __func__);
1993 }
1994
1995 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001996 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001997
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001998 if (IsDynamicTensor(outputInfo))
1999 {
2000 return Fail("%s: Dynamic output tensors are not supported", __func__);
2001 }
2002
Mike Kellyb5fdf382019-06-11 16:35:25 +01002003 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002004 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2005 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002006
2007 if (!weightsPin.IsValid() || !biasPin.IsValid())
2008 {
2009 return Fail("%s: Operation has invalid inputs", __func__);
2010 }
2011
2012 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002013 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002014 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2015
2016 armnn::Convolution2dDescriptor desc;
2017 desc.m_DataLayout = armnn::DataLayout::NHWC;
2018 ActivationFn activation;
2019
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002020 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002021 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002022 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2023 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2024 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2025 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2026 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2027 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002028 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002029 {
2030 return Fail("%s: Operation has invalid inputs", __func__);
2031 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002032 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002033 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002034 {
2035 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002036 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2037 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2038 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002039 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002040 {
2041 return Fail("%s: Operation has invalid inputs", __func__);
2042 }
2043
2044 const uint32_t kernelX = weights.GetShape()[2];
2045 const uint32_t kernelY = weights.GetShape()[1];
2046 const uint32_t inputX = inputInfo.GetShape()[2];
2047 const uint32_t inputY = inputInfo.GetShape()[1];
2048
2049 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2050 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002051 }
2052 else
2053 {
2054 return Fail("%s: Unsupported number of operation inputs", __func__);
2055 }
2056
2057 desc.m_BiasEnabled = true;
2058 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2059
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002060 bool isSupported = false;
2061 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2062 IsConvolution2dSupported,
2063 data.m_Backends,
2064 isSupported,
2065 inputInfo,
2066 outputInfo,
2067 desc,
2068 weights.GetInfo(),
2069 biases);
2070 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002071 {
2072 return false;
2073 }
2074
2075 armnn::IConnectableLayer* startLayer =
2076 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2077
2078 if (!startLayer)
2079 {
2080 return Fail("%s: AddConvolution2dLayer failed", __func__);
2081 }
2082
2083 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2084
2085 if (!endLayer)
2086 {
2087 return Fail("%s: ProcessActivation failed", __func__);
2088 }
2089
2090 input.Connect(startLayer->GetInputSlot(0));
2091
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002092 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002093}
2094
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002095template<typename HalPolicy,
2096 typename HalOperation = typename HalPolicy::Operation,
2097 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002098bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2099{
2100 using HalOperand = typename HalPolicy::Operand;
2101 using HalOperandType = typename HalPolicy::OperandType;
2102
2103 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2104 if (!input.IsValid() )
2105 {
2106 return Fail("%s: Operation has invalid inputs", __func__);
2107 }
2108
2109 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2110 unsigned int rank = inputInfo.GetNumDimensions();
2111 if (rank != 4)
2112 {
2113 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2114 }
2115
2116 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2117 if (!output)
2118 {
2119 return Fail("%s: Could not read output 0", __func__);
2120 }
2121
2122 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2123 if (IsDynamicTensor(outputInfo))
2124 {
2125 return Fail("%s: Dynamic output tensors are not supported", __func__);
2126 }
2127
2128 armnn::DepthToSpaceDescriptor descriptor;
2129
2130 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2131 if (descriptor.m_BlockSize <= 1)
2132 {
2133 return Fail("%s: Block size must be at least 1 in all dimensions");
2134 }
2135
2136 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2137 if (Is12Operand(*output))
2138 {
2139 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2140 }
2141
2142 bool isSupported = false;
2143 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2144 IsDepthToSpaceSupported,
2145 data.m_Backends,
2146 isSupported,
2147 inputInfo,
2148 outputInfo,
2149 descriptor);
2150 if (!isSupported)
2151 {
2152 return false;
2153 }
2154
2155 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2156 assert(layer != nullptr);
2157 input.Connect(layer->GetInputSlot(0));
2158
2159 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2160}
2161
2162template<typename HalPolicy,
2163 typename HalOperation = typename HalPolicy::Operation,
2164 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002165bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2166{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002167 using HalOperand = typename HalPolicy::Operand;
2168 using HalOperandType = typename HalPolicy::OperandType;
2169
2170 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002171
2172 if (!input.IsValid())
2173 {
2174 return Fail("%s: Operation has invalid inputs", __func__);
2175 }
2176
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002177 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002178
2179 if (!output)
2180 {
2181 return Fail("%s: Could not read output 0", __func__);
2182 }
2183
2184 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002185 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002186
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002187 if (IsDynamicTensor(outputInfo))
2188 {
2189 return Fail("%s: Dynamic output tensors are not supported", __func__);
2190 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002191
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002192 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002193 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002194 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002195
2196 if (weightsOperand == nullptr)
2197 {
2198 return Fail("%s: Operand is invalid", __func__);
2199 }
2200 armnn::DepthwiseConvolution2dDescriptor desc;
2201 desc.m_DataLayout = armnn::DataLayout::NHWC;
2202
Mike Kellyb5fdf382019-06-11 16:35:25 +01002203 // Reinterpret weight data as [ H, W, I, M ]
2204 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2205 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002206 inputInfo.GetShape()[3],
2207 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002208
2209 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2210 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2211
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002212 const ConstTensorPin weightsPin =
2213 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2214 1,
2215 model,
2216 data,
2217 HWIMToMIHW,
2218 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002219
2220 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002221 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002222
2223 if (!weightsPin.IsValid() || !biasPin.IsValid())
2224 {
2225 return Fail("%s: Operation has invalid inputs", __func__);
2226 }
2227
2228 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2229 armnn::ConstTensor bias = biasPin.GetConstTensor();
2230 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2231
2232 ActivationFn activation;
2233
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002234 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002235 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002236 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2237 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2238 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2239 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2240 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2241 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002242 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002243 {
2244 return Fail("%s: Operation has invalid inputs", __func__);
2245 }
2246 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002247 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002248 {
2249 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002250 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2251 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2252 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002253 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002254 {
2255 return Fail("%s: Operation has invalid inputs", __func__);
2256 }
2257
2258 const uint32_t kernelX = weights.GetShape()[3];
2259 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002260 const uint32_t inputX = inputInfo.GetShape()[2];
2261 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002262
2263 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2264 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2265 }
2266 else
2267 {
2268 return Fail("%s: Unsupported number of operation inputs", __func__);
2269 }
2270
2271 desc.m_BiasEnabled = true;
2272 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2273
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002274 bool isSupported = false;
2275 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2276 IsDepthwiseConvolutionSupported,
2277 data.m_Backends,
2278 isSupported,
2279 inputInfo,
2280 outputInfo,
2281 desc,
2282 weights.GetInfo(),
2283 biases);
2284 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002285 {
2286 return false;
2287 }
2288
2289 armnn::IConnectableLayer* startLayer =
2290 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2291 if (!startLayer)
2292 {
2293 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2294 }
2295
2296 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2297 if (!endLayer)
2298 {
2299 return Fail("%s: ProcessActivation failed", __func__);
2300 }
2301
2302 input.Connect(startLayer->GetInputSlot(0));
2303
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002304 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002305}
2306
Mike Kelly3c673942019-07-25 09:26:06 +01002307template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002308 typename HalOperation = typename HalPolicy::Operation,
2309 typename HalModel = typename HalPolicy::Model>
2310bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002311{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002312 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002313
2314 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2315 if (!input.IsValid())
2316 {
2317 return Fail("%s: Operation has invalid input", __func__);
2318 }
2319
Sadik Armagan98c0f662019-11-21 15:54:36 +00002320 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2321 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2322 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2323 {
2324 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2325 }
2326
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002327 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002328 if (!outputOperand)
2329 {
2330 return Fail("%s: Operation has invalid outputs", __func__);
2331 }
2332
2333 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2334 if (IsDynamicTensor(outputInfo))
2335 {
2336 return Fail("%s: Dynamic output tensors are not supported", __func__);
2337 }
2338
2339 bool isSupported = false;
2340 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2341 IsDequantizeSupported,
2342 data.m_Backends,
2343 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002344 inputInfo,
2345 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002346 if (!isSupported)
2347 {
2348 return false;
2349 }
2350
2351 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2352 assert(layer != nullptr);
2353 input.Connect(layer->GetInputSlot(0));
2354
2355 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2356}
2357
2358template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002359 typename HalOperation = typename HalPolicy::Operation,
2360 typename HalModel = typename HalPolicy::Model>
2361bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002362{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002363 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002364
2365 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2366 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2367
2368 if (!input0.IsValid() || !input1.IsValid())
2369 {
2370 return Fail("%s: Operation has invalid inputs", __func__);
2371 }
2372
2373 // The FuseActivation parameter is always the input index 2
2374 // and it should be optional
2375 ActivationFn activationFunction;
2376 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2377 {
2378 return Fail("%s: Operation has invalid inputs", __func__);
2379 }
2380
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002381 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002382 if (!output)
2383 {
2384 return Fail("%s: Could not read output 0", __func__);
2385 }
2386
2387 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2388 if (IsDynamicTensor(outputInfo))
2389 {
2390 return Fail("%s: Dynamic output tensors are not supported", __func__);
2391 }
2392
2393 bool isSupported = false;
2394 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2395 IsDivisionSupported,
2396 data.m_Backends,
2397 isSupported,
2398 input0.GetTensorInfo(),
2399 input1.GetTensorInfo(),
2400 outputInfo);
2401 if (!isSupported)
2402 {
2403 return false;
2404 }
2405
2406 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2407 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2408
2409 if (endLayer)
2410 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00002411 bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002412 if (!isReshapeSupported)
2413 {
2414 return false;
2415 }
2416
Mike Kelly46272802019-08-14 17:00:48 +01002417 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2418 }
2419 return Fail("%s: ProcessActivation failed", __func__);
2420}
2421
2422template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002423 typename HalOperation = typename HalPolicy::Operation,
2424 typename HalModel = typename HalPolicy::Model>
2425bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002426{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002427 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002428
2429 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2430 if (!input.IsValid())
2431 {
2432 return Fail("%s: Operation has invalid inputs", __func__);
2433 }
2434
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002435 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002436 if (!outputOperand)
2437 {
2438 return Fail("%s: Operation has invalid outputs", __func__);
2439 }
2440
2441 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2442 if (IsDynamicTensor(outputInfo))
2443 {
2444 return Fail("%s: Dynamic output tensors are not supported", __func__);
2445 }
2446
2447 bool isSupported = false;
2448 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2449 IsFloorSupported,
2450 data.m_Backends,
2451 isSupported,
2452 input.GetTensorInfo(),
2453 outputInfo);
2454 if (!isSupported)
2455 {
2456 return false;
2457 }
2458
2459 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2460 assert(layer != nullptr);
2461 input.Connect(layer->GetInputSlot(0));
2462
2463 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2464}
2465
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002466inline bool IsQSymm8(const V1_0::Operand&)
2467{
2468 return false;
2469}
2470
2471#ifdef ARMNN_ANDROID_NN_V1_2
2472
2473inline bool IsQSymm8(const V1_2::Operand& operand)
2474{
2475 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2476}
2477
2478#endif
2479
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002480enum class DequantizeStatus
2481{
2482 SUCCESS,
2483 NOT_REQUIRED,
2484 INVALID_OPERAND
2485};
2486
2487using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2488
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002489template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002490 typename HalOperation = typename HalPolicy::Operation,
2491 typename HalModel = typename HalPolicy::Model>
2492DequantizeResult DequantizeIfRequired(size_t operand_index,
2493 const HalOperation& operation,
2494 const HalModel& model,
2495 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002496{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002497 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002498
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002499 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002500 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002501 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002502 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002503 }
2504
2505 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2506 {
2507 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002508 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002509 }
2510
2511 const size_t weightsInputIndex = operation.inputs[operand_index];
2512
2513 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2514 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2515 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2516 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002517 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002518 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002519 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2520 {
2521 continue;
2522 }
2523
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002524 size_t outOpIndex = weightsInputIndex + 1;
2525 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002526 {
2527 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002528 }
2529
2530 if (outOpIndex != weightsInputIndex)
2531 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002532 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002533 }
2534
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002535 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002536 BOOST_ASSERT(operand);
2537
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002538 if (!IsQSymm8(*operand))
2539 {
2540 // Only supporting dequantize from QSYMM8 to FLOAT
2541 break;
2542 }
2543
2544 // Allocate a new buffer for the dequantized data and manually dequantize
2545 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2546 if (!startValue)
2547 {
2548 // Failed to get the operand address
2549 break;
2550 }
2551
2552 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2553 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002554 const float quantizationScale = operand->scale;
2555
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002556 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2557 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2558 {
2559 float* dstPtr = dequantizedBuffer.get();
2560 BOOST_ASSERT(dstPtr);
2561 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2562 }
2563
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002564 // Construct tensor info for dequantized ConstTensor
2565 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2566 operand->dimensions.data(),
2567 armnn::DataType::Float32);
2568
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002569 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2570 std::move(tensorInfo),
2571 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002572 }
2573
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002574 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002575}
2576
2577template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002578 typename HalOperation = typename HalPolicy::Operation,
2579 typename HalModel = typename HalPolicy::Model>
2580ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2581 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002582 const ConversionData& data,
2583 size_t operandIndex,
2584 bool optional = false)
2585{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002586 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2587
2588 DequantizeStatus status = std::get<3>(dequantized);
2589 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002590 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002591 case DequantizeStatus::INVALID_OPERAND:
2592 {
2593 // return invalid const tensor pin
2594 return ConstTensorPin();
2595 }
2596 case DequantizeStatus::NOT_REQUIRED:
2597 {
2598 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2599 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2600 }
2601 case DequantizeStatus::SUCCESS:
2602 default:
2603 {
2604 return ConstTensorPin(
2605 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2606 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002607 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002608}
2609
2610
Mike Kelly46272802019-08-14 17:00:48 +01002611template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002612 typename HalOperation = typename HalPolicy::Operation,
2613 typename HalModel = typename HalPolicy::Model>
2614bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002615{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002616 using HalOperand = typename HalPolicy::Operand;
2617
Mike Kelly46272802019-08-14 17:00:48 +01002618 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2619 if (!input.IsValid())
2620 {
2621 return Fail("%s: Operation has invalid inputs", __func__);
2622 }
2623
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002624 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002625 if (!output)
2626 {
2627 return Fail("%s: Could not read output 0", __func__);
2628 }
2629
2630 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2631 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2632
2633 if (IsDynamicTensor(outputInfo))
2634 {
2635 return Fail("%s: Dynamic output tensors are not supported", __func__);
2636 }
2637
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002638 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2639 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002640
2641 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002642 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002643 return Fail("%s: Operation has invalid weights", __func__);
2644 }
2645
2646 if (!biasPin.IsValid())
2647 {
2648 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002649 }
2650
2651 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2652 armnn::ConstTensor bias = biasPin.GetConstTensor();
2653 armnn::TensorInfo reshapedInfo = inputInfo;
2654
2655 try
2656 {
2657 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002658 }
2659 catch (const std::exception& e)
2660 {
Mike Kelly46272802019-08-14 17:00:48 +01002661 return Fail("%s: %s", __func__, e.what());
2662 }
2663
2664 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2665 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2666
2667 ActivationFn activationFunction;
2668 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2669 {
2670 return Fail("%s: Operation has invalid inputs", __func__);
2671 }
2672
2673 armnn::FullyConnectedDescriptor desc;
2674 desc.m_TransposeWeightMatrix = true;
2675 desc.m_BiasEnabled = true;
2676
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002677 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2678 weights.GetInfo().GetShape(),
2679 outputInfo.GetShape(),
2680 desc.m_TransposeWeightMatrix))
2681 {
2682 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2683 }
2684
Mike Kelly46272802019-08-14 17:00:48 +01002685 bool isSupported = false;
2686 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2687 IsFullyConnectedSupported,
2688 data.m_Backends,
2689 isSupported,
2690 reshapedInfo,
2691 outputInfo,
2692 weights.GetInfo(),
2693 bias.GetInfo(),
2694 desc);
2695 if (!isSupported)
2696 {
2697 return false;
2698 }
2699
2700 armnn::IConnectableLayer* startLayer =
2701 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2702 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2703
2704 if (endLayer != nullptr)
2705 {
2706 if (inputInfo.GetNumDimensions() > 2U)
2707 {
2708 armnn::ReshapeDescriptor reshapeDescriptor;
2709 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2710
2711 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2712 assert(reshapeLayer != nullptr);
2713 input.Connect(reshapeLayer->GetInputSlot(0));
2714 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2715 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2716 }
2717 else
2718 {
2719 input.Connect(startLayer->GetInputSlot(0));
2720 }
2721
2722 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2723 }
2724 else
2725 {
2726 return Fail("%s: ProcessActivation failed", __func__);
2727 }
2728}
2729
2730template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002731 typename HalOperation = typename HalPolicy::Operation,
2732 typename HalModel = typename HalPolicy::Model>
2733bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002734{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002735 using HalOperand = typename HalPolicy::Operand;
2736
Mike Kelly999e2092019-08-15 10:46:46 +01002737 if (operation.inputs.size() != 1)
2738 {
2739 return Fail("%s: Optional inputs are not supported", __func__);
2740 }
2741
Mike Kelly46272802019-08-14 17:00:48 +01002742 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2743 if (!input.IsValid())
2744 {
2745 return Fail("%s: Operation has invalid inputs", __func__);
2746 }
2747
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002748 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002749 if (!output)
2750 {
2751 return Fail("%s: Could not read output 0", __func__);
2752 }
2753
2754 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2755 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2756
2757 if (IsDynamicTensor(outputInfo))
2758 {
2759 return Fail("%s: Dynamic output tensors are not supported", __func__);
2760 }
2761 if (outputInfo.GetNumDimensions() != 4u)
2762 {
2763 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2764 }
2765
2766 armnn::L2NormalizationDescriptor desc;
2767 desc.m_DataLayout = armnn::DataLayout::NHWC;
2768
2769 bool isSupported = false;
2770 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2771 IsL2NormalizationSupported,
2772 data.m_Backends,
2773 isSupported,
2774 inputInfo,
2775 outputInfo,
2776 desc);
2777 if (!isSupported)
2778 {
2779 return false;
2780 }
2781
2782 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2783 assert(layer != nullptr);
2784 input.Connect(layer->GetInputSlot(0));
2785
2786 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2787}
2788
2789template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002790 typename HalOperation = typename HalPolicy::Operation,
2791 typename HalModel = typename HalPolicy::Model>
2792bool ConvertLocalResponseNormalization(const HalOperation& operation,
2793 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002794 ConversionData& data)
2795{
Mike Kelly999e2092019-08-15 10:46:46 +01002796 if (operation.inputs.size() != 5)
2797 {
2798 return Fail("%s: Optional inputs are not supported", __func__);
2799 }
2800
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002801 using HalOperand = typename HalPolicy::Operand;
2802 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002803
2804 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2805 if (!input.IsValid())
2806 {
2807 return Fail("%s: Operation has invalid inputs", __func__);
2808 }
2809
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002810 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002811 if (!output)
2812 {
2813 return Fail("%s: Could not read output 0", __func__);
2814 }
2815
2816 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2817 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2818
2819 if (IsDynamicTensor(outputInfo))
2820 {
2821 return Fail("%s: Dynamic output tensors are not supported", __func__);
2822 }
2823 if (outputInfo.GetNumDimensions() != 4u)
2824 {
2825 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2826 }
2827
2828 armnn::NormalizationDescriptor descriptor;
2829 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2830 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2831 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2832
2833 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002834 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002835 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2836 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2837 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2838 {
2839 return Fail("%s: Operation has invalid inputs", __func__);
2840 }
2841
2842 // ArmNN expects normSize to be the full size of the normalization
2843 // window rather than the radius as in AndroidNN.
2844 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2845
2846 bool isSupported = false;
2847 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2848 IsNormalizationSupported,
2849 data.m_Backends,
2850 isSupported,
2851 inputInfo,
2852 outputInfo,
2853 descriptor);
2854 if (!isSupported)
2855 {
2856 return false;
2857 }
2858
2859
2860 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2861 assert(layer != nullptr);
2862 input.Connect(layer->GetInputSlot(0));
2863
2864 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2865}
2866
2867template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002868 typename HalOperation = typename HalPolicy::Operation,
2869 typename HalModel = typename HalPolicy::Model>
2870bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002871{
Mike Kelly46272802019-08-14 17:00:48 +01002872 armnn::ActivationDescriptor desc;
2873 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2874
2875 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2876}
2877
2878template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002879 typename HalOperation = typename HalPolicy::Operation,
2880 typename HalModel = typename HalPolicy::Model>
2881bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002882{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002883 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002884
2885 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2886 if (!input.IsValid())
2887 {
2888 return Fail("%s: Operation has invalid inputs", __func__);
2889 }
2890
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002891 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002892 if (!output)
2893 {
2894 return Fail("%s: Could not read output 0", __func__);
2895 }
2896
2897 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2898 if (IsDynamicTensor(outputInfo))
2899 {
2900 return Fail("%s: Dynamic output tensors are not supported", __func__);
2901 }
2902
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002903 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002904 if (!axisOperand)
2905 {
2906 return Fail("%s: Could not read input 1", __func__);
2907 }
2908
2909 std::vector<int32_t> axis;
2910 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2911 {
2912 return Fail("%s: Input 1 has invalid values", __func__);
2913 }
2914
2915 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2916
2917 // Convert the axis to unsigned int and remove duplicates.
2918 unsigned int rank = inputInfo.GetNumDimensions();
2919 std::set<unsigned int> uniqueAxis;
2920 std::transform(axis.begin(), axis.end(),
2921 std::inserter(uniqueAxis, uniqueAxis.begin()),
2922 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2923
2924 // Get the "keep dims" flag.
2925 int32_t keepDims = 0;
2926 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2927 {
2928 return Fail("%s: Could not read input 2", __func__);
2929 }
2930
2931 armnn::MeanDescriptor descriptor;
2932 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2933 descriptor.m_KeepDims = keepDims > 0;
2934
2935 bool isSupported = false;
2936 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2937 IsMeanSupported,
2938 data.m_Backends,
2939 isSupported,
2940 inputInfo,
2941 outputInfo,
2942 descriptor);
2943 if (!isSupported)
2944 {
2945 return false;
2946 }
2947
2948 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2949 assert(layer != nullptr);
2950 input.Connect(layer->GetInputSlot(0));
2951
2952 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2953}
2954
2955template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002956 typename HalOperation = typename HalPolicy::Operation,
2957 typename HalModel = typename HalPolicy::Model>
2958bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002959{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002960 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002961
2962 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2963 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2964
2965 if (!input0.IsValid() || !input1.IsValid())
2966 {
2967 return Fail("%s: Operation has invalid inputs", __func__);
2968 }
2969
2970 // The FuseActivation parameter is always the input index 2
2971 // and it should be optional
2972 ActivationFn activationFunction;
2973 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2974 {
2975 return Fail("%s: Operation has invalid inputs", __func__);
2976 }
2977
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002978 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002979
2980 if (outputOperand == nullptr)
2981 {
2982 return false;
2983 }
2984
2985 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2986 if (IsDynamicTensor(outputInfo))
2987 {
2988 return Fail("%s: Dynamic output tensors are not supported", __func__);
2989 }
2990
2991 bool isSupported = false;
2992 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2993 IsMultiplicationSupported,
2994 data.m_Backends,
2995 isSupported,
2996 input0.GetTensorInfo(),
2997 input1.GetTensorInfo(),
2998 outputInfo);
2999 if (!isSupported)
3000 {
3001 return false;
3002 }
3003
3004 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3005 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3006
3007 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3008 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3009
3010 if (endLayer != nullptr)
3011 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00003012 bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003013 if (!isReshapeSupported)
3014 {
3015 return false;
3016 }
3017
Mike Kelly46272802019-08-14 17:00:48 +01003018 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3019 }
3020 else
3021 {
3022 return Fail("%s: ProcessActivation failed", __func__);
3023 }
3024}
3025
3026template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003027 typename HalOperation = typename HalPolicy::Operation,
3028 typename HalModel = typename HalPolicy::Model>
3029bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003030{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003031 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003032
Mike Kelly3c673942019-07-25 09:26:06 +01003033 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3034 if (!input.IsValid())
3035 {
3036 return Fail("%s: Operation has invalid inputs", __func__);
3037 }
3038
3039 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3040 unsigned int rank = inputInfo.GetNumDimensions();
3041
3042 armnn::PadDescriptor descriptor;
3043 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3044 {
3045 return Fail("%s: Could not convert paddings", __func__);
3046 }
3047
3048 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3049 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3050 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003051 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003052 {
3053 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3054 }
3055
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003056 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003057 if (!output)
3058 {
3059 return Fail("%s: Could not read output", __func__);
3060 }
3061
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003062 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003063 if (IsDynamicTensor(outputInfo))
3064 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003065 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003066 }
3067
3068 bool isSupported = false;
3069 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3070 IsPadSupported,
3071 data.m_Backends,
3072 isSupported,
3073 inputInfo,
3074 outputInfo,
3075 descriptor);
3076 if (!isSupported)
3077 {
3078 return false;
3079 }
3080
3081 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3082 assert(layer != nullptr);
3083 input.Connect(layer->GetInputSlot(0));
3084 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3085
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003086 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003087}
3088
Mike Kelly0a879362019-07-29 16:56:31 +01003089template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003090 typename HalOperation = typename HalPolicy::Operation,
3091 typename HalModel = typename HalPolicy::Model>
3092bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003093{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003094 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003095
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003096 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3097 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3098 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003099
3100 if (inputOperand == nullptr
3101 || requestedShapeOperand == nullptr
3102 || outputOperand == nullptr)
3103 {
3104 return Fail("%s: Operation has invalid inputs", __func__);
3105 }
3106
3107 if (requestedShapeOperand->dimensions.size() != 1)
3108 {
3109 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3110 __func__, requestedShapeOperand->dimensions.size());
3111 }
3112
3113 std::vector<int32_t> targetDimensions;
3114 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3115 {
3116 return Fail("%s: Could not read values of input 1", __func__);
3117 }
3118
3119 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3120
3121 Shape requestedShape;
3122 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3123 // function that resolves these values into a fully specified tensor shape.
3124 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3125 {
3126 return Fail("%s: Failed to resolve the requested shape", __func__);
3127 }
3128
3129 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3130 if (!SameShape(requestedShape, outputOperandShape))
3131 {
3132 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3133 }
3134
3135 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3136 if (!input.IsValid())
3137 {
3138 return Fail("%s: Could not read input 0", __func__);
3139 }
3140
3141 armnn::ReshapeDescriptor reshapeDescriptor;
3142 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3143 requestedShape.dimensions.data());
3144
3145 bool isSupported = false;
3146 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3147 IsReshapeSupported,
3148 data.m_Backends,
3149 isSupported,
3150 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003151 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003152 reshapeDescriptor);
3153 if (!isSupported)
3154 {
3155 return false;
3156 }
3157
3158 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3159 assert(layer != nullptr);
3160 input.Connect(layer->GetInputSlot(0));
3161
3162 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3163}
3164
3165template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003166 typename HalOperation = typename HalPolicy::Operation,
3167 typename HalModel = typename HalPolicy::Model>
3168bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003169{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003170 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003171
Mike Kelly0a879362019-07-29 16:56:31 +01003172 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3173 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3174
3175 if (!input0.IsValid() || !input1.IsValid())
3176 {
3177 return Fail("%s: Operation has invalid inputs", __func__);
3178 }
3179
3180 // The FuseActivation parameter is always the input index 2
3181 // and it should be optional
3182 ActivationFn activationFunction;
3183 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3184 {
3185 return Fail("%s: Operation has invalid inputs", __func__);
3186 }
3187
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003188 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003189 if (!output)
3190 {
3191 return Fail("%s: Could not read output 0", __func__);
3192 }
3193
3194 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3195 if (IsDynamicTensor(outputInfo))
3196 {
3197 return Fail("%s: Dynamic output tensors are not supported", __func__);
3198 }
3199
3200 bool isSupported = false;
3201 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3202 IsSubtractionSupported,
3203 data.m_Backends,
3204 isSupported,
3205 input0.GetTensorInfo(),
3206 input1.GetTensorInfo(),
3207 outputInfo);
3208 if (!isSupported)
3209 {
3210 return false;
3211 }
3212
3213 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3214 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3215
3216 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3217 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3218
3219 if (endLayer)
3220 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00003221 bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003222 if (!isReshapeSupported)
3223 {
3224 return false;
3225 }
Mike Kelly0a879362019-07-29 16:56:31 +01003226 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3227 }
3228
3229 return Fail("%s: ProcessActivation failed", __func__);
3230}
3231
Finn Williams23b87b32019-07-30 11:44:05 +01003232template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003233 typename HalOperation = typename HalPolicy::Operation,
3234 typename HalModel = typename HalPolicy::Model>
3235bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003236{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003237 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003238
3239 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3240 if (!input.IsValid())
3241 {
3242 return Fail("%s: Operation has invalid inputs", __func__);
3243 }
3244
3245 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3246 unsigned int rank = inputInfo.GetNumDimensions();
3247 if (rank > 4)
3248 {
3249 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3250 }
3251
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003252 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003253 if (!output)
3254 {
3255 return Fail("%s: Could not read output 0", __func__);
3256 }
3257
3258 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3259 {
3260 return Fail("%s: Dynamic output tensors are not supported", __func__);
3261 }
3262
3263 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3264 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003265 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003266
3267 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3268
3269 std::vector<int32_t> axis;
3270 if (!axisOperand)
3271 {
3272 axis.assign(dimensionSequence,
3273 dimensionSequence + rank);
3274 }
3275 else
3276 {
3277 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3278 }
3279
3280 std::vector<uint32_t> outputDims;
3281 for (unsigned int i = 0; i < rank; i++)
3282 {
3283 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3284 auto currentDimension = inputInfo.GetShape()[i];
3285 if (skipSqueeze || currentDimension != 1)
3286 {
3287 outputDims.push_back(currentDimension);
3288 }
3289 }
3290
3291 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3292
3293 armnn::TensorInfo outputInfo = inputInfo;
3294 outputInfo.SetShape(outShape);
3295
3296 armnn::ReshapeDescriptor reshapeDesc;
3297 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3298
3299 bool isSupported = false;
3300 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3301 IsReshapeSupported,
3302 data.m_Backends,
3303 isSupported,
3304 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003305 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003306 reshapeDesc);
3307 if (!isSupported)
3308 {
3309 return false;
3310 }
3311
3312 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3313 assert(layer != nullptr);
3314 input.Connect(layer->GetInputSlot(0));
3315
3316 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3317}
3318
3319template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003320 typename HalOperation = typename HalPolicy::Operation,
3321 typename HalModel = typename HalPolicy::Model>
3322bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003323{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003324 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003325
3326 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3327 if (!input.IsValid())
3328 {
3329 return Fail("%s: Operation has invalid inputs", __func__);
3330 }
3331
3332 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3333 unsigned int rank = inputInfo.GetNumDimensions();
3334 if (rank > 4)
3335 {
3336 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3337 }
3338
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003339 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003340 if (!output)
3341 {
3342 return Fail("%s: Could not read output 0", __func__);
3343 }
3344
3345 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3346 if (IsDynamicTensor(outputInfo))
3347 {
3348 return Fail("%s: Dynamic output tensors are not supported", __func__);
3349 }
3350
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003351 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3352 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3353 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003354
3355 std::vector<int32_t> beginValues;
3356 std::vector<int32_t> endValues;
3357 std::vector<int32_t> stridesValues;
3358
3359 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003360 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003361 {
3362 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3363 {
3364 return false;
3365 }
3366
3367 if (operandValues.size() != rank)
3368 {
3369 return false;
3370 }
3371
3372 return true;
3373 };
3374
3375 if (!ValidateInputOperands(*beginOperand, beginValues)
3376 || !ValidateInputOperands(*endOperand, endValues)
3377 || !ValidateInputOperands(*stridesOperand, stridesValues))
3378 {
3379 return Fail("%s: Operation has invalid input operand", __func__);
3380 }
3381
3382 // Stride cannot have value '0'
3383 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3384 {
3385 return Fail("%s: Stride must be non-zero value.", __func__);
3386 }
3387
3388 armnn::StridedSliceDescriptor descriptor;
3389 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3390 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3391 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3392 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3393
3394 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3395 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3396 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3397 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3398 {
3399 return Fail("%s: Operation has invalid inputs", __func__);
3400 }
3401
3402 bool isSupported = false;
3403 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3404 IsStridedSliceSupported,
3405 data.m_Backends,
3406 isSupported,
3407 inputInfo,
3408 outputInfo,
3409 descriptor);
3410 if (!isSupported)
3411 {
3412 return false;
3413 }
3414
3415 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3416 assert(layer != nullptr);
3417 input.Connect(layer->GetInputSlot(0));
3418
3419 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3420}
3421
3422template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003423 typename HalOperation = typename HalPolicy::Operation,
3424 typename HalModel = typename HalPolicy::Model>
3425bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003426{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003427 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003428
3429 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3430 if (!input.IsValid())
3431 {
3432 return Fail("%s: Operation has invalid inputs", __func__);
3433 }
3434
3435 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3436 unsigned int rank = inputInfo.GetNumDimensions();
3437 if (rank > 4)
3438 {
3439 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3440 }
3441
3442 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3443 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003444 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003445
3446 std::vector<int32_t> perm(rank);
3447 if (!permOperand)
3448 {
3449 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3450 for (unsigned int i = rank; i > 0; i--)
3451 {
3452 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3453 }
3454 }
3455 else
3456 {
3457 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3458 }
3459
3460 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3461
James Conroy1bde8e32020-01-22 16:40:57 +00003462 // Permutation vectors (outputDims) are given in ANN/Tf format, we must convert them to ArmNN format
3463 // For ANN/Tf/ACL: output[i] = input[ perm[i] ]
3464 // For ArmNN: output[ perm[i] ] = input[i]
3465 // e.g. 3,0,1,2 -> 1,2,3,0
3466 std::vector<unsigned int> armnnPermuteShape(rank);
3467 std::vector<unsigned int>::iterator it;
3468 for (unsigned int i = 0u; i < rank; ++i)
3469 {
3470 it = std::find(outputDims.begin(), outputDims.end(), i);
3471 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(outputDims.begin(), it));
3472 }
3473
Mike Kelly46272802019-08-14 17:00:48 +01003474 armnn::PermuteDescriptor permuteDesc;
James Conroy1bde8e32020-01-22 16:40:57 +00003475 permuteDesc.m_DimMappings = armnn::PermutationVector(armnnPermuteShape.data(), armnnPermuteShape.size());
Mike Kelly46272802019-08-14 17:00:48 +01003476
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003477 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003478 if (!output)
3479 {
3480 return Fail("%s: Could not read output 0", __func__);
3481 }
3482
3483 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003484 if (IsDynamicTensor(outputInfo))
3485 {
3486 return Fail("%s: Dynamic output tensors are not supported", __func__);
3487 }
3488
Mike Kelly46272802019-08-14 17:00:48 +01003489
3490 bool isSupported = false;
3491 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3492 IsPermuteSupported,
3493 data.m_Backends,
3494 isSupported,
3495 inputInfo,
3496 outputInfo,
3497 permuteDesc);
3498 if (!isSupported)
3499 {
3500 return false;
3501 }
3502
3503 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3504 assert(layer != nullptr);
3505 input.Connect(layer->GetInputSlot(0));
3506
3507 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3508}
3509
3510template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003511 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003512 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003513 typename HalModel = typename HalPolicy::Model>
3514bool ConvertBatchToSpaceNd(const HalOperation& operation,
3515 const HalModel& model,
3516 ConversionData& data)
3517{
Finn Williams23b87b32019-07-30 11:44:05 +01003518
3519 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3520 if (!input.IsValid())
3521 {
3522 return Fail("%s: Operation has invalid inputs", __func__);
3523 }
3524
3525 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3526 if (!output)
3527 {
3528 return Fail("%s: Could not read output 0", __func__);
3529 }
3530
3531 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3532 if (IsDynamicTensor(outputInfo))
3533 {
3534 return Fail("%s: Dynamic output tensors are not supported", __func__);
3535 }
3536
3537 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3538 if (!blockOperand)
3539 {
3540 return Fail("%s: Could not read input 1", __func__);
3541 }
3542
3543 // Convert the block operand to int32
3544 std::vector<int32_t> block;
3545 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3546 {
3547 return Fail("%s: Input 1 has invalid values", __func__);
3548 }
3549
3550 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3551
3552 unsigned int rank = inputInfo.GetNumDimensions();
3553 if (rank != 4)
3554 {
3555 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3556 }
3557
3558 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3559 {
3560 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3561 " greater than or equal to 1", __func__);
3562 }
3563
3564 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3565 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3566 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3567
3568 if (Is12Operand(*output))
3569 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003570 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003571 }
3572 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3573 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3574
3575 bool isSupported = false;
3576 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3577 IsBatchToSpaceNdSupported,
3578 data.m_Backends,
3579 isSupported,
3580 inputInfo,
3581 outputInfo,
3582 batchToSpaceNdDesc);
3583 if (!isSupported)
3584 {
3585 return false;
3586 }
3587
3588 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3589 assert(layer != nullptr);
3590 input.Connect(layer->GetInputSlot(0));
3591
3592 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3593}
Mike Kelly0a879362019-07-29 16:56:31 +01003594
Finn Williamsd74c5052019-07-30 17:06:00 +01003595template<typename HalPolicy,
3596 typename HalOperation = typename HalPolicy::Operation,
3597 typename HalOperand = typename HalPolicy::Operand,
3598 typename HalModel = typename HalPolicy::Model>
3599bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3600{
3601 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3602 if (!input.IsValid())
3603 {
3604 return Fail("%s: Operation has invalid inputs", __func__);
3605 }
3606
3607 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3608 unsigned int rank = inputInfo.GetNumDimensions();
3609 unsigned int spatialDim = rank - 2;
3610
3611 if (rank != 4)
3612 {
3613 Fail("%s: Only inputs with rank 4 are supported", __func__);
3614 }
3615
3616 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3617 if (!output)
3618 {
3619 return Fail("%s: Could not read output 0", __func__);
3620 }
3621
3622 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3623 if (IsDynamicTensor(outputInfo))
3624 {
3625 return Fail("%s: Dynamic output tensors are not supported", __func__);
3626 }
3627
3628 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3629 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3630
3631 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3632 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3633 {
3634 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3635 }
3636
3637 std::vector<int32_t> blockShape;
3638 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3639 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3640 {
3641 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3642 }
3643
3644 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3645 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3646 {
3647 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3648 }
3649
3650 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3651 std::vector<int32_t> paddings;
3652 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3653 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3654 {
3655 int paddingBeforeInput = paddings[i];
3656 int paddingAfterInput = paddings[i + 1];
3657 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3658 {
3659 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3660 }
3661
3662 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3663 }
3664
3665 armnn::SpaceToBatchNdDescriptor descriptor;
3666 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3667 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3668 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3669
3670 if (Is12Operand(*output))
3671 {
3672 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3673 }
3674
3675 bool isSupported = false;
3676 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3677 IsSpaceToBatchNdSupported,
3678 data.m_Backends,
3679 isSupported,
3680 inputInfo,
3681 outputInfo,
3682 descriptor);
3683 if (!isSupported)
3684 {
3685 return false;
3686 }
3687
3688 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3689 assert(layer != nullptr);
3690 input.Connect(layer->GetInputSlot(0));
3691
3692 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3693}
3694
saoste01b8471482018-10-10 09:44:51 +01003695} // namespace armnn_driver