blob: 5c6238e2e839a751db62e89ecadce3571bee95c3 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
Keith Davis71006492020-01-06 17:44:16 +0000184// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
186{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000187 return type == V1_2::OperandType::BOOL ||
188 type == V1_2::OperandType::TENSOR_FLOAT16 ||
189 type == V1_2::OperandType::TENSOR_FLOAT32 ||
190 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000192 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
193 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194 type == V1_2::OperandType::TENSOR_INT32;
195}
196
197#endif
198
199inline bool IsBool(V1_0::Operand)
200{
201 return false;
202}
203
Sadik Armagan61113162019-07-25 09:09:40 +0100204inline bool Is12Operand(V1_0::Operand)
205{
206 return false;
207}
208
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209#ifdef ARMNN_ANDROID_NN_V1_2
210
211inline bool IsBool(V1_2::Operand operand)
212{
213 return operand.type == V1_2::OperandType::BOOL;
214}
215
Sadik Armagan61113162019-07-25 09:09:40 +0100216/// Checks if a operand is 1_2 Operand
217inline bool Is12Operand(V1_2::Operand)
218{
219 return true;
220}
221
Mike Kellyb5fdf382019-06-11 16:35:25 +0100222#endif
223
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100224template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000225armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
226 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 armnn::TensorInfo reshapeInfo)
228{
229 armnn::ReshapeDescriptor reshapeDescriptor;
230 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
231
232 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
233 BOOST_ASSERT(reshapeLayer != nullptr);
234
235 // Attach the input layer to the reshape layer
236 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
237 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
238
239 return *reshapeLayer;
240}
241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000242bool BroadcastTensor(LayerInputHandle& input0,
243 LayerInputHandle& input1,
244 armnn::IConnectableLayer* startLayer,
245 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100246{
247 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100248
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100249 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
250 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
251
252 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
253 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
254
255 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100256 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100257 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
258 input0.Connect(startLayer->GetInputSlot(0));
259 input1.Connect(startLayer->GetInputSlot(1));
260
Sadik Armagan64b19b52019-08-19 09:49:58 +0100261 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100262 }
263
264 // Since the number of dimensions do not match then we need to add degenerate dimensions
265 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
266
267 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
268 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
269 boost::numeric_cast<int>(inputDimensions1));
270
271 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
272 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
273 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
274
275 const armnn::TensorShape& smallShape = smallInfo.GetShape();
276 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
277 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
278 {
279 reshapedDimensions[i] = smallShape[i - sizeDifference];
280 }
281
282 armnn::TensorInfo reshapedInfo = smallInfo;
283 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
284 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100285
286 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
287 armnn::ReshapeDescriptor reshapeDescriptor;
288
289 bool isSupported = false;
290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
291 IsReshapeSupported,
292 data.m_Backends,
293 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000294 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100295 reshapedInfo,
296 reshapeDescriptor);
297 if (!isSupported)
298 {
299 return false;
300 }
301
302 BOOST_ASSERT(data.m_Network != nullptr);
303 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304
305 if (input0IsSmaller)
306 {
307 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
308 //
309 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100310 // | |
311 // Reshape |
312 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100314
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100315 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
316 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100317 }
318 else
319 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100320 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
321 //
322 // Input0 Input1
323 // | |
324 // | Reshape
325 // \ /
326 // StartLayer
327
arovir01b0717b52018-09-05 17:03:25 +0100328 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100329 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100330 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100331
332 return true;
arovir01b0717b52018-09-05 17:03:25 +0100333}
334
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000335void CalcPadding(uint32_t input,
336 uint32_t kernel,
337 uint32_t stride,
338 uint32_t& outPadHead,
339 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100340 android::nn::PaddingScheme scheme)
341{
342 int32_t padHead;
343 int32_t padTail;
344 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
345 outPadHead = boost::numeric_cast<uint32_t>(padHead);
346 outPadTail = boost::numeric_cast<uint32_t>(padTail);
347}
348
Mike Kelly86b36d42019-07-12 16:39:33 +0100349#ifdef ARMNN_ANDROID_NN_V1_2
350
351void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
352 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
353{
354 int32_t padHead;
355 int32_t padTail;
356 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
357 outPadHead = boost::numeric_cast<uint32_t>(padHead);
358 outPadTail = boost::numeric_cast<uint32_t>(padTail);
359}
360
Mike Kelly26123db2020-01-15 10:02:33 +0000361void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100362 int32_t& outPadTail, android::nn::PaddingScheme scheme)
363{
364 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
365}
366
Mike Kelly86b36d42019-07-12 16:39:33 +0100367#endif
368
Matthew Bentham912b3622019-05-03 15:49:14 +0100369Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100370{
371 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100372 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100373 shape.dimensions = operand.dimensions;
374 shape.scale = operand.scale;
375 shape.offset = operand.zeroPoint;
376 return shape;
377}
378
Mike Kelly46272802019-08-14 17:00:48 +0100379#ifdef ARMNN_ANDROID_NN_V1_2
380
381Shape GetOperandShape(const V1_2::Operand& operand)
382{
383 Shape shape;
384 shape.type = OperandType(operand.type);
385 shape.dimensions = operand.dimensions;
386 shape.scale = operand.scale;
387 shape.offset = operand.zeroPoint;
388 return shape;
389}
390
391#endif
392
arovir01b0717b52018-09-05 17:03:25 +0100393// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
394// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100395// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
396// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100397void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 const armnn::TensorInfo& weightInfo,
399 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100400{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000401 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100402 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000403 // NOTE: Bias scale is always set to 0 for per-axis quantization and
404 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
405 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100406 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000407 return biasScale * inputInfo.GetQuantizationScale();
408 };
409
410 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
411 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
412
413 biasInfo.SetQuantizationScales(biasScales);
414 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
415
416 ALOGV("Bias quantization params have been updated for per-axis quantization");
417 }
418 else
419 {
420 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
421 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
422 {
423 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
424 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
425 {
426 ALOGW("Bias quantization scale has been modified to match input * weights");
427 biasInfo.SetQuantizationScale(expectedBiasScale);
428 }
arovir01b0717b52018-09-05 17:03:25 +0100429 }
430 }
431}
432
433// 4D Tensor Permutations
434const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
435const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
436const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
437const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
438
439// 3D Permutation Vectors
440const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
441const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
442const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
443
444template<typename OSlot>
445armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
446 const armnn::PermutationVector& mappings)
447{
448 // Add swizzle layer
449 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
450
451 BOOST_ASSERT(layer != nullptr);
452
453 // Connect input to swizzle layer
454 input.Connect(layer->GetInputSlot(0));
455
456 // Setup swizzled output
457 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
458 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
459
460 return *layer;
461}
462
463void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
464{
465 // Add swizzle layer
466 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
467 // Connect swizzled input to layer
468 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
469}
470
471armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
472{
473 // Add deswizzle layer
474 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
475 return deswizzleLayer;
476}
477
478// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
479armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
480 LayerInputHandle& input,
481 armnn::IConnectableLayer& firstLayer,
482 armnn::IConnectableLayer& lastLayer)
483{
484 SwizzleIn(network, input, firstLayer, 0);
485 return DeswizzleOut(network, lastLayer, 0);
486}
487
488// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
489armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
490 armnn::IConnectableLayer& layer)
491{
492 return SwizzleInDeswizzleOut(network, input, layer, layer);
493}
494
495bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
496 const armnn::TensorShape & outputShape,
497 uint32_t concatDim)
498{
499 // Validate the output shape is correct given the input shapes (which have just been validated)
500 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
501 if (outputShape.GetNumDimensions() != numDimensions)
502 {
503 return Fail("%s: Output shape has wrong number of dimensions", __func__);
504 }
505
506 unsigned int outputSizeAlongConcatenatedDimension = 0;
507 for (unsigned int i = 0; i < inputShapes.size(); i++)
508 {
509 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
510 }
511
512 for (unsigned int i = 0; i < numDimensions; ++i)
513 {
514 if (i == concatDim)
515 {
516 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
517 {
518 return Fail(
519 "%s: Invalid output shape for dimension %d (%d != %d)",
520 __func__,
521 i,
522 outputShape[i],
523 outputSizeAlongConcatenatedDimension);
524 }
525 }
526 else
527 {
528 if (outputShape[i] != inputShapes[0][i])
529 {
530 return Fail("%s: Invalid output shape", __func__);
531 }
532 }
533 }
534
535 return true;
536}
537
538bool RequiresReshape(armnn::TensorShape & inputShape)
539{
540 return inputShape.GetNumDimensions() < 3;
541}
542
arovir01b0717b52018-09-05 17:03:25 +0100543void SwizzleInputs(armnn::INetwork& network,
544 std::vector<LayerInputHandle>& inputs,
545 std::vector<armnn::TensorShape>& inputShapes,
546 const armnn::PermutationVector& mapping)
547{
548 if (!mapping.IsEqual(IdentityPermutation4D))
549 {
550 size_t nInputs = inputs.size();
551 for (size_t i=0; i<nInputs; ++i)
552 {
553 // add swizzle layer
554 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
555 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
556 auto& outputInfo = outputSlot.GetTensorInfo();
557 // replace inputs with the swizzled ones
558 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
559 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
560 }
561 }
562}
563
Kevin Mayaed08ac2019-12-12 16:33:31 +0000564bool CheckReshapeSupported(ConversionData& data,
565 std::vector<LayerInputHandle>& inputs,
566 std::vector<armnn::TensorShape>& inputShapes,
567 const armnn::PermutationVector& mapping,
568 const armnn::TensorInfo& outputInfo)
569{
570 if (!mapping.IsEqual(IdentityPermutation4D))
571 {
572 size_t nInputs = inputs.size();
573 for (size_t i=0; i<nInputs; ++i)
574 {
575 // check permute layer
576 armnn::PermuteDescriptor permuteDesc;
577 permuteDesc.m_DimMappings = mapping;
578
579 bool isSupported = false;
580 FORWARD_LAYER_SUPPORT_FUNC(__func__,
581 IsPermuteSupported,
582 data.m_Backends,
583 isSupported,
584 inputs[i].GetTensorInfo(),
585 outputInfo,
586 permuteDesc);
587 if (!isSupported)
588 {
589 return false;
590 }
591
592 }
593 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
594 }
595 return true;
596}
597
598
narpra01f176d5a2018-11-18 20:17:48 +0000599bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
600 int32_t & concatDimension,
601 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100602{
narpra01f176d5a2018-11-18 20:17:48 +0000603 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100604 BOOST_ASSERT(numberOfDimensions >= 3);
605
606 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000607 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
608 // or along dimension 0 or 2 for a 3-D tensor.
609 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100610 {
narpra01f176d5a2018-11-18 20:17:48 +0000611 concatDimension = 1;
612 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
613 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100614 }
narpra01f176d5a2018-11-18 20:17:48 +0000615 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100616 {
narpra01f176d5a2018-11-18 20:17:48 +0000617 concatDimension = 0;
618 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
619 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100620 }
narpra01f176d5a2018-11-18 20:17:48 +0000621 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100622}
623
624} // anonymous namespace
625
626namespace armnn_driver
627{
628
629//// Creates an ArmNN activation layer and connects it to the given layer, if the
630//// passed in AndroidNN activation function requires so.
631//// @return The end layer of the sequence of layers built for the given AndroidNN
632//// activation function or nullptr if an error occurred (e.g. unsupported activation).
633//// Note that the end layer matches the input layer if no activation is required
634//// (the sequence of layers has length 1).
635armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
636 ActivationFn activation,
637 armnn::IConnectableLayer* prevLayer,
638 ConversionData& data);
639
640} // namespace armnn_driver
641
642///
643/// Utility templates
644///
645
646namespace armnn_driver
647{
648
649using namespace android::nn;
650
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651template<typename HalPolicy,
652 typename HalOperand = typename HalPolicy::Operand,
653 typename HalOperation = typename HalPolicy::Operation,
654 typename HalModel = typename HalPolicy::Model>
655const HalOperand* GetInputOperand(const HalOperation& operation,
656 uint32_t inputIndex,
657 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100658 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100659{
660 if (inputIndex >= operation.inputs.size())
661 {
saoste01b8471482018-10-10 09:44:51 +0100662 if (failOnIndexOutOfBounds)
663 {
664 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
665 }
arovir01b0717b52018-09-05 17:03:25 +0100666 return nullptr;
667 }
668
669 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
670 return &model.operands[operation.inputs[inputIndex]];
671}
672
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100673template<typename HalPolicy,
674 typename HalOperand = typename HalPolicy::Operand,
675 typename HalOperation = typename HalPolicy::Operation,
676 typename HalModel = typename HalPolicy::Model>
677const HalOperand* GetOutputOperand(const HalOperation& operation,
678 uint32_t outputIndex,
679 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100680{
681 if (outputIndex >= operation.outputs.size())
682 {
683 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
684 return nullptr;
685 }
686
687 // Model should have been validated beforehand
688 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
689
690 return &model.operands[operation.outputs[outputIndex]];
691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100696const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100697 const HalModel& model,
698 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000699 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100700{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100702
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100703 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100704 switch (operand.lifetime)
705 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100706 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100707 {
708 // Constant found in model.operandValues
709 valueStart = &model.operandValues[operand.location.offset];
710 break;
711 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100713 {
714 // Constant specified via a Memory object
715 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
716 break;
717 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100718 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 {
720 // An optional input tensor with no values is not an error so should not register as a fail
721 if (optional)
722 {
723 valueStart = nullptr;
724 break;
725 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100726 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000727 }
arovir01b0717b52018-09-05 17:03:25 +0100728 default:
729 {
730 // Unsupported/invalid (e.g. can't get value of an input to the model)
731 Fail("%s: unsupported/invalid operand lifetime: %s",
732 __func__, toString(operand.lifetime).c_str());
733 valueStart = nullptr;
734 }
735 }
736
737 return valueStart;
738}
739
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100741 typename HalOperation = typename HalPolicy::Operation,
742 typename HalModel = typename HalPolicy::Model,
743 typename HalOperandType = typename HalPolicy::OperandType>
744bool GetOperandType(const HalOperation& operation,
745 uint32_t inputIndex,
746 const HalModel& model,
747 HalOperandType& type)
748{
749 using HalOperand = typename HalPolicy::Operand;
750
751 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
752 if (!operand)
753 {
754 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
755 }
756
757 type = operand->type;
758 return true;
759}
760
761template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000762 typename HalOperand = typename HalPolicy::Operand>
763bool IsOperandConstant(const HalOperand& operand)
764{
765 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
766
767 HalOperandLifeTime lifetime = operand.lifetime;
768
769 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
770 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
771 lifetime == HalOperandLifeTime::NO_VALUE;
772}
773
774template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775 typename HalOperand = typename HalPolicy::Operand,
776 typename HalModel = typename HalPolicy::Model>
777ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
778 const HalModel& model,
779 const ConversionData& data,
780 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
781 const armnn::TensorShape* overrideTensorShape = nullptr,
782 bool optional = false)
783{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100784 if (!IsOperandTypeSupportedForTensors(operand.type))
785 {
786 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
787 return ConstTensorPin();
788 }
789
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000790 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100791 {
792 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
793 return ConstTensorPin();
794 }
795
796 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
797 if (!valueStart)
798 {
799 if (optional)
800 {
801 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
802 return ConstTensorPin(true);
803 }
804 // mandatory tensor with no values
805 Fail("%s: failed to get operand address", __func__);
806 return ConstTensorPin();
807 }
808
809 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000810 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
811 if (tensorInfo.HasPerAxisQuantization())
812 {
813 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
814 }
815
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100816 if (overrideTensorShape != nullptr)
817 {
818 tensorInfo.SetShape(*overrideTensorShape);
819 }
820 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
821}
822
823template<typename HalPolicy,
824 typename HalOperation = typename HalPolicy::Operation,
825 typename HalModel = typename HalPolicy::Model>
826ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
827 uint32_t inputIndex,
828 const HalModel& model,
829 const ConversionData& data,
830 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
831 const armnn::TensorShape* overrideTensorShape = nullptr,
832 bool optional = false)
833{
834 using HalOperand = typename HalPolicy::Operand;
835
836 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
837 if (!operand)
838 {
839 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
840 return ConstTensorPin();
841 }
842 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
843 model,
844 data,
845 dimensionMappings,
846 overrideTensorShape,
847 optional);
848}
849
850template<typename HalPolicy,
851 typename OutputType,
852 typename HalOperandType = typename HalPolicy::OperandType,
853 typename HalOperation = typename HalPolicy::Operation,
854 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100855bool GetInputScalar(const HalOperation& operation,
856 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100857 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100858 OutputType& outValue,
859 const HalModel& model,
860 const ConversionData& data)
861{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100862 using HalOperand = typename HalPolicy::Operand;
863
864 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100865 if (!operand)
866 {
867 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
868 }
869
870 if (operand->type != type)
871 {
872 return Fail("%s: unexpected operand type: %s (should be %s)",
873 __func__, toString(operand->type).c_str(), toString(type).c_str());
874 }
875
876 if (operand->location.length != sizeof(OutputType))
877 {
878 return Fail("%s: incorrect operand location length: %i (should be %i)",
879 __func__, operand->location.length, sizeof(OutputType));
880 }
881
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100882 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100883 if (!valueAddress)
884 {
885 return Fail("%s: failed to get address for operand", __func__);
886 }
887
888 outValue = *(static_cast<const OutputType*>(valueAddress));
889 return true;
890}
891
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892template<typename HalPolicy,
893 typename HalOperation = typename HalPolicy::Operation,
894 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100895bool GetInputInt32(const HalOperation& operation,
896 uint32_t inputIndex,
897 int32_t& outValue,
898 const HalModel& model,
899 const ConversionData& data)
900{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100901 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100902}
903
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100904template<typename HalPolicy,
905 typename HalOperation = typename HalPolicy::Operation,
906 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100907bool GetInputFloat32(const HalOperation& operation,
908 uint32_t inputIndex,
909 float& outValue,
910 const HalModel& model,
911 const ConversionData& data)
912{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100913 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100914}
915
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100916template<typename HalPolicy,
917 typename HalOperation = typename HalPolicy::Operation,
918 typename HalOperandType = typename HalPolicy::OperandType,
919 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100920bool GetInputActivationFunctionImpl(const HalOperation& operation,
921 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100922 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100923 ActivationFn& outActivationFunction,
924 const HalModel& model,
925 const ConversionData& data)
926{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100927 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100928 {
929 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
930 __func__,
931 toString(type).c_str(),
932 toString(OperandType::INT32).c_str(),
933 toString(OperandType::TENSOR_INT32).c_str());
934 }
935
936 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100938 {
939 return Fail("%s: failed to get activation input value", __func__);
940 }
941 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
942 return true;
943}
944
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945template<typename HalPolicy,
946 typename HalOperation = typename HalPolicy::Operation,
947 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100948bool GetInputActivationFunction(const HalOperation& operation,
949 uint32_t inputIndex,
950 ActivationFn& outActivationFunction,
951 const HalModel& model,
952 const ConversionData& data)
953{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100954 return GetInputActivationFunctionImpl<HalPolicy>(operation,
955 inputIndex,
956 HalPolicy::OperandType::INT32,
957 outActivationFunction,
958 model,
959 data);
arovir01b0717b52018-09-05 17:03:25 +0100960}
961
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962template<typename HalPolicy,
963 typename HalOperation = typename HalPolicy::Operation,
964 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100965bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
966 uint32_t inputIndex,
967 ActivationFn& outActivationFunction,
968 const HalModel& model,
969 const ConversionData& data)
970{
971 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972 return GetInputActivationFunctionImpl<HalPolicy>(operation,
973 inputIndex,
974 HalPolicy::OperandType::INT32,
975 outActivationFunction,
976 model,
977 data);
arovir01b0717b52018-09-05 17:03:25 +0100978}
979
980
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981template<typename HalPolicy,
982 typename HalOperation = typename HalPolicy::Operation,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetOptionalInputActivation(const HalOperation& operation,
985 uint32_t inputIndex,
986 ActivationFn& activationFunction,
987 const HalModel& model,
988 const ConversionData& data)
989{
990 if (operation.inputs.size() <= inputIndex)
991 {
992 activationFunction = ActivationFn::kActivationNone;
993 }
994 else
995 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100997 {
998 return Fail("%s: Operation has invalid inputs", __func__);
999 }
1000 }
1001 return true;
1002}
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004template<typename HalPolicy,
1005 typename ConvolutionDescriptor,
1006 typename HalOperation = typename HalPolicy::Operation,
1007 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001008bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1009 uint32_t dilationXIndex,
1010 ConvolutionDescriptor& descriptor,
1011 const HalModel& model,
1012 const ConversionData& data)
1013{
1014 bool success = true;
1015 if (operation.inputs.size() >= dilationXIndex + 2)
1016 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017 success &= GetInputScalar<HalPolicy>(operation,
1018 dilationXIndex,
1019 HalPolicy::OperandType::INT32,
1020 descriptor.m_DilationX,
1021 model,
1022 data);
1023 success &= GetInputScalar<HalPolicy>(operation,
1024 dilationXIndex + 1,
1025 HalPolicy::OperandType::INT32,
1026 descriptor.m_DilationY,
1027 model,
1028 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001029 }
1030
1031 return success;
1032}
1033
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001034template<typename HalPolicy,
1035 typename HalOperand = typename HalPolicy::Operand,
1036 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001037bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001038 std::vector<int32_t>& outValues,
1039 const HalModel& model,
1040 const ConversionData& data)
1041{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001043 {
1044 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1045 }
1046
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001048 if (!startAddress)
1049 {
1050 return Fail("%s: failed to get operand address", __func__, operand.type);
1051 }
1052
1053 // Check number of bytes is sensible
1054 const uint32_t numBytes = operand.location.length;
1055 if (numBytes % sizeof(int32_t) != 0)
1056 {
1057 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1058 __func__, numBytes, sizeof(int32_t));
1059 }
1060
1061 outValues.resize(numBytes / sizeof(int32_t));
1062 memcpy(outValues.data(), startAddress, numBytes);
1063 return true;
1064}
1065
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001066template<typename HalPolicy,
1067 typename HalOperation = typename HalPolicy::Operation,
1068 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001069bool GetInputPaddingScheme(const HalOperation& operation,
1070 uint32_t inputIndex,
1071 PaddingScheme& outPaddingScheme,
1072 const HalModel& model,
1073 const ConversionData& data)
1074{
1075 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001077 {
1078 return Fail("%s: failed to get padding scheme input value", __func__);
1079 }
1080
1081 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1082 return true;
1083}
1084
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001085template<typename HalPolicy,
1086 typename HalOperation = typename HalPolicy::Operation,
1087 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001088LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1089 uint32_t inputIndex,
1090 const HalModel& model,
1091 ConversionData& data)
1092{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001093 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001094 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001095 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1096
1097 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001098 if (!operand)
1099 {
1100 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1101 return LayerInputHandle();
1102 }
1103
1104 if (!IsOperandTypeSupportedForTensors(operand->type))
1105 {
1106 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1107 return LayerInputHandle();
1108 }
1109
Sadik Armagan44bcc022019-06-18 17:21:36 +01001110 try
arovir01b0717b52018-09-05 17:03:25 +01001111 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001112 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001113 if (IsDynamicTensor(operandTensorInfo))
1114 {
1115 Fail("%s: dynamic input tensors are not supported", __func__);
1116 return LayerInputHandle();
1117 }
arovir01b0717b52018-09-05 17:03:25 +01001118
Sadik Armagan44bcc022019-06-18 17:21:36 +01001119 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001120 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001121 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001122 {
1123 // NOTE: We must check whether we can support the input tensor on at least one
1124 // of the provided backends; otherwise we cannot convert the operation
1125 bool isInputSupported = false;
1126 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1127 IsInputSupported,
1128 data.m_Backends,
1129 isInputSupported,
1130 operandTensorInfo);
1131
1132 if (!isInputSupported)
1133 {
1134 Fail("%s: unsupported input tensor", __func__);
1135 return LayerInputHandle();
1136 }
1137
1138 BOOST_FALLTHROUGH; // intentional fallthrough
1139 }
1140 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001141 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001142 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001143 // The tensor is either an operand internal to the model, or a model input.
1144 // It can be associated with an ArmNN output slot for an existing layer.
1145
1146 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1147 const uint32_t operandIndex = operation.inputs[inputIndex];
1148 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001149 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001150 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001151 case HalOperandLifeTime::CONSTANT_REFERENCE:
1152 {
1153 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1154 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1155 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001156 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001157 bool isSupported = false;
1158 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1159 IsConstantSupported,
1160 data.m_Backends,
1161 isSupported,
1162 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001163 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001164 {
1165 return LayerInputHandle();
1166 }
1167
1168 armnn::IConnectableLayer* constantLayer =
1169 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1170 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1171 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1172
1173 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1174 }
1175 else
1176 {
1177 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001178 return LayerInputHandle();
1179 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001180 break;
arovir01b0717b52018-09-05 17:03:25 +01001181 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001182 default:
arovir01b0717b52018-09-05 17:03:25 +01001183 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001184 // Unsupported lifetime for an input tensor
1185 Fail("%s: unsupported lifetime for input tensor: %s",
1186 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001187 return LayerInputHandle();
1188 }
arovir01b0717b52018-09-05 17:03:25 +01001189 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001190 }
1191 catch (UnsupportedOperand<HalOperandType>& e)
1192 {
1193 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1194 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001195 }
1196}
1197
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001198template<typename HalPolicy,
1199 typename HalOperation = typename HalPolicy::Operation,
1200 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001201bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1202 uint32_t operationOutputIndex,
1203 armnn::IConnectableLayer& layer,
1204 uint32_t layerOutputIndex,
1205 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001206 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001207{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001208 using HalOperand = typename HalPolicy::Operand;
1209
1210 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001211 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1212 {
1213 return false;
1214 }
1215
1216 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1217
1218 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1219 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1220
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001221 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001222
1223 return true;
1224}
1225
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001226template<typename HalPolicy,
1227 typename HalOperation = typename HalPolicy::Operation,
1228 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001229armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1230 uint32_t inputIndex,
1231 const HalModel& model,
1232 ConversionData& data)
1233{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001234 using HalOperand = typename HalPolicy::Operand;
1235
1236 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001237 if (!operand)
1238 {
1239 return armnn::DataLayout::NHWC;
1240 }
1241
1242 if (!IsBool(*operand))
1243 {
1244 return armnn::DataLayout::NHWC;
1245 }
1246
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001247 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001248 if (!valueAddress)
1249 {
1250 return armnn::DataLayout::NHWC;
1251 }
1252
1253 if (*(static_cast<const bool*>(valueAddress)))
1254 {
1255 return armnn::DataLayout::NCHW;
1256 }
1257 else
1258 {
1259 return armnn::DataLayout::NHWC;
1260 }
1261}
1262
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001263template<typename HalPolicy,
1264 typename HalOperation = typename HalPolicy::Operation,
1265 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001266bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1267 uint32_t outputIndex,
1268 armnn::IConnectableLayer& layer,
1269 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001270 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001271{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001272 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1273 outputIndex,
1274 layer,
1275 outputIndex,
1276 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001277 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001278}
1279
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001280template<typename HalPolicy,
1281 typename HalOperation = typename HalPolicy::Operation,
1282 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001283bool ConvertToActivation(const HalOperation& operation,
1284 const char* operationName,
1285 const armnn::ActivationDescriptor& activationDesc,
1286 const HalModel& model,
1287 ConversionData& data)
1288{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001289 using HalOperand = typename HalPolicy::Operand;
1290
1291 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001292 if (!input.IsValid())
1293 {
1294 return Fail("%s: Input 0 is invalid", operationName);
1295 }
1296
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001297 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001298 if (!outputOperand)
1299 {
1300 return false;
1301 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001302
1303 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001304 if (IsDynamicTensor(outInfo))
1305 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001306 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001307 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001308
1309 bool isSupported = false;
1310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1311 IsActivationSupported,
1312 data.m_Backends,
1313 isSupported,
1314 input.GetTensorInfo(),
1315 outInfo,
1316 activationDesc);
1317 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001318 {
1319 return false;
1320 }
1321
1322 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1323 BOOST_ASSERT(layer != nullptr);
1324 input.Connect(layer->GetInputSlot(0));
1325
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001326 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001327}
1328
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001329template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001330 typename HalOperation = typename HalPolicy::Operation,
1331 typename HalModel = typename HalPolicy::Model>
1332bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1333{
1334 armnn::ActivationDescriptor desc;
1335 desc.m_Function = armnn::ActivationFunction::ReLu;
1336
1337 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1338}
1339
1340template<typename HalPolicy,
1341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
1343bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1344{
1345 armnn::ActivationDescriptor desc;
1346 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1347 desc.m_A = 1.0f;
1348 desc.m_B = -1.0f;
1349
1350 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1351}
1352
1353template<typename HalPolicy,
1354 typename HalOperation = typename HalPolicy::Operation,
1355 typename HalModel = typename HalPolicy::Model>
1356bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1357{
1358 armnn::ActivationDescriptor desc;
1359 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1360 desc.m_A = 6.0f;
1361
1362 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1363}
1364
1365template<typename HalPolicy,
1366 typename HalOperation = typename HalPolicy::Operation,
1367 typename HalModel = typename HalPolicy::Model>
1368bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1369{
1370 armnn::ActivationDescriptor desc;
1371 desc.m_Function = armnn::ActivationFunction::TanH;
1372 desc.m_A = 1.0f; // android nn does not support tanH parameters
1373 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1374
1375 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1376}
1377
1378template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001379 typename HalOperation = typename HalPolicy::Operation,
1380 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001381bool ConvertPaddings(const HalOperation& operation,
1382 const HalModel& model,
1383 ConversionData& data,
1384 unsigned int rank,
1385 armnn::PadDescriptor& padDescriptor)
1386{
1387 using HalOperand = typename HalPolicy::Operand;
1388
1389 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1390 if (!paddingsOperand)
1391 {
1392 return Fail("%s: Could not read paddings operand", __func__);
1393 }
1394
1395 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1396 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1397 {
1398 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1399 }
1400
1401 std::vector<int32_t> paddings;
Mike Kelly789cf9a2020-02-18 10:03:30 +00001402 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1403 {
1404 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1405 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001406
1407 // add padding for each dimension of input tensor.
1408 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1409 {
1410 int paddingBeforeInput = paddings[i];
1411 int paddingAfterInput = paddings[i + 1];
1412
1413 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1414 {
1415 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1416 }
1417
1418 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1419 }
1420
1421 return true;
1422}
1423
1424template<typename HalPolicy,
1425 typename HalOperation = typename HalPolicy::Operation,
1426 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001427bool ConvertPooling2d(const HalOperation& operation,
1428 const char* operationName,
1429 armnn::PoolingAlgorithm poolType,
1430 const HalModel& model,
1431 ConversionData& data)
1432{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001433 using HalOperand = typename HalPolicy::Operand;
1434 using HalOperandType = typename HalPolicy::OperandType;
1435
1436 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001437 if (!input.IsValid())
1438 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001439 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001440 }
1441
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001442 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001443 if (!output)
1444 {
1445 return Fail("%s: Could not read output 0", __func__);
1446 }
1447
1448 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1449 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1450
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001451 if (IsDynamicTensor(outputInfo))
1452 {
1453 return Fail("%s: Dynamic output tensors are not supported", __func__);
1454 }
1455
arovir01b0717b52018-09-05 17:03:25 +01001456 armnn::Pooling2dDescriptor desc;
1457 desc.m_PoolType = poolType;
1458 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001459 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001460
1461 ActivationFn activation;
1462
Sadik Armagan15d63e22019-07-26 16:59:35 +01001463 auto inputSize = operation.inputs.size();
1464
1465 if (inputSize >= 10)
1466 {
1467 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1468 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1469 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1470 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1471 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1472 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1473 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1474 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1475 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1476 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1477 {
1478 return Fail("%s: Operation has invalid inputs", operationName);
1479 }
1480
1481 if (Is12Operand(*output))
1482 {
1483 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1484 }
1485 }
1486 else
arovir01b0717b52018-09-05 17:03:25 +01001487 {
1488 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1489 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001490 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1491 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1492 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1493 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1494 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1495 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001496 {
1497 return Fail("%s: Operation has invalid inputs", operationName);
1498 }
1499
Sadik Armagan15d63e22019-07-26 16:59:35 +01001500 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001501 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001502 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001503 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001504
1505 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1506 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1507 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1508
1509 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1510 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001511 }
1512
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001513 bool isSupported = false;
1514 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1515 IsPooling2dSupported,
1516 data.m_Backends,
1517 isSupported,
1518 inputInfo,
1519 outputInfo,
1520 desc);
1521 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001522 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001523 return false;
arovir01b0717b52018-09-05 17:03:25 +01001524 }
arovir01b0717b52018-09-05 17:03:25 +01001525
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001526 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1527 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001528 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001529 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001530 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001531
1532 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1533 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001534 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001535 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001536 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001537
1538 input.Connect(pooling2dLayer->GetInputSlot(0));
1539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001540 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001541}
1542
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001543template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001544 typename HalOperation = typename HalPolicy::Operation,
1545 typename HalModel = typename HalPolicy::Model>
1546bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001547{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001548 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001549
1550 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1551 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1552
1553 if (!input0.IsValid() || !input1.IsValid())
1554 {
1555 return Fail("%s: Operation has invalid inputs", __func__);
1556 }
1557
1558 // The FuseActivation parameter is always the input index 2
1559 // and it should be optional
1560 ActivationFn activationFunction;
1561 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1562 {
1563 return Fail("%s: Operation has invalid inputs", __func__);
1564 }
1565
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001566 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001567 if (!outputOperand)
1568 {
1569 return false;
1570 }
1571
1572 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1573 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1574
1575 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1576 if (IsDynamicTensor(outputInfo))
1577 {
1578 return Fail("%s: Dynamic output tensors are not supported", __func__);
1579 }
1580
1581 bool isSupported = false;
1582 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1583 IsAdditionSupported,
1584 data.m_Backends,
1585 isSupported,
1586 inputInfo0,
1587 inputInfo1,
1588 outputInfo);
1589 if (!isSupported)
1590 {
1591 return false;
1592 }
1593
1594 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1595 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1596
1597 if (endLayer != nullptr)
1598 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001599 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001600 if (!isReshapeSupported)
1601 {
1602 return false;
1603 }
1604
Mike Kelly46272802019-08-14 17:00:48 +01001605 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1606 }
1607 else
1608 {
1609 return Fail("%s: ProcessActivation failed", __func__);
1610 }
1611}
1612
1613template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001614 typename HalOperation = typename HalPolicy::Operation,
1615 typename HalModel = typename HalPolicy::Model>
1616bool ConvertArgMinMax(const HalOperation& operation,
1617 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001618 ConversionData& data,
1619 armnn::ArgMinMaxFunction argMinMaxFunction)
1620{
1621 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1622
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001623 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001624 using HalOperandType = typename HalPolicy::OperandType;
1625
1626 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1627
1628 if (!input0.IsValid())
1629 {
1630 return Fail("%s: Operation has invalid inputs", __func__);
1631 }
1632
1633 int32_t axis;
1634 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1635 {
1636 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1637 }
1638
1639 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1640 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1641
1642 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1643 {
1644 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1645 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1646 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1647 return Fail("%s: Axis must be in range [-n, n)", __func__);
1648 }
1649
1650 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1651 if (!output)
1652 {
1653 return Fail("%s: Could not read output 0", __func__);
1654 }
1655
1656 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1657
1658 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1659 if (IsDynamicTensor(outputInfo))
1660 {
1661 return Fail("%s: Dynamic output tensors are not supported", __func__);
1662 }
1663
1664 armnn::ArgMinMaxDescriptor descriptor;
1665 descriptor.m_Function = argMinMaxFunction;
1666 descriptor.m_Axis = axis;
1667
1668 bool isSupported = false;
1669 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1670 IsArgMinMaxSupported,
1671 data.m_Backends,
1672 isSupported,
1673 inputInfo0,
1674 outputInfo,
1675 descriptor);
1676 if (!isSupported)
1677 {
1678 return false;
1679 }
1680
1681 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1682 assert(layer != nullptr);
1683
1684 input0.Connect(layer->GetInputSlot(0));
1685
1686 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1687}
1688
1689template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001690 typename HalOperation = typename HalPolicy::Operation,
1691 typename HalModel = typename HalPolicy::Model>
1692bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001693{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001694 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001695 using HalOperandType = typename HalPolicy::OperandType;
1696
1697 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1698 if (operation.inputs.size() <= 1)
1699 {
1700 return Fail("%s: Operation has insufficient arguments", __func__);
1701 }
1702
1703 // Get inputs and outputs
1704 const std::size_t numInputTensors = operation.inputs.size() - 1;
1705
1706 int32_t concatDim;
1707 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1708 {
1709 return Fail("%s: Operation has invalid inputs", __func__);
1710 }
1711
1712 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1713 if (!outputOperand)
1714 {
1715 return Fail("%s: Operation has no outputs", __func__);
1716 }
1717
1718
1719 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1720 armnn::TensorShape outputShape = outputInfo.GetShape();
1721
1722 //
1723 // handle negative concat dims along the lines of tensorflow as described here:
1724 // https://www.tensorflow.org/api_docs/python/tf/concat
1725 // "negative axis refers to axis + rank(values)-th dimension"
1726 //
1727 if (concatDim < 0)
1728 {
1729 concatDim += outputShape.GetNumDimensions();
1730 }
1731
1732 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1733 {
1734 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1735 }
1736
1737 std::vector<LayerInputHandle> inputHandles;
1738 std::vector<armnn::TensorShape> inputShapes;
1739
1740 inputHandles.reserve(numInputTensors);
1741 inputShapes.reserve(numInputTensors);
1742
1743 bool inputsHaveBeenReshaped = false;
1744 unsigned int tensorDimensionsAdded = 0;
1745
1746 for (uint32_t i = 0; i < numInputTensors; ++i)
1747 {
1748 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1749 if (!operand)
1750 {
1751 return Fail("%s: Operation has invalid inputs", __func__);
1752 }
1753
Teresa Charlin3b959602019-10-31 17:05:47 +00001754 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1755 if (!operandInputHandle.IsValid())
1756 {
1757 return Fail("%s: Operation has invalid inputs", __func__);
1758 }
Mike Kellyb8805202019-07-31 17:25:43 +01001759
Teresa Charlin3b959602019-10-31 17:05:47 +00001760 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001761 if (operandShape.GetNumDimensions() == 0)
1762 {
1763 return Fail("%s: Operands with rank 0 are not supported", __func__);
1764 }
1765
1766 if (RequiresReshape(operandShape))
1767 {
1768 inputsHaveBeenReshaped = true;
1769
1770 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1771
1772 // Expand the tensor to three dimensions
1773 if (operandShape.GetNumDimensions() == 2)
1774 {
1775 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1776 tensorDimensionsAdded = 1;
1777 }
1778 else
1779 {
1780 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1781 tensorDimensionsAdded = 2;
1782 }
1783
Kevin Mayaed08ac2019-12-12 16:33:31 +00001784 armnn::ReshapeDescriptor reshapeDescriptor;
1785 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1786
1787 bool isSupported = false;
1788 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1789 IsReshapeSupported,
1790 data.m_Backends,
1791 isSupported,
1792 operandInputHandle.GetTensorInfo(),
1793 reshapeInfo,
1794 reshapeDescriptor);
1795 if (!isSupported)
1796 {
1797 return false;
1798 }
1799
Mike Kellyb8805202019-07-31 17:25:43 +01001800 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1801 *data.m_Network,
1802 operandInputHandle,
1803 reshapeInfo
1804 );
1805
1806 // Point to the reshape operation rather then the input operation
1807 operandShape = reshapeInfo.GetShape();
1808 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1809 }
1810
1811 inputShapes.emplace_back(operandShape);
1812 inputHandles.emplace_back(operandInputHandle);
1813
1814 if (!inputHandles.back().IsValid())
1815 {
1816 return Fail("%s: Operation has invalid inputs", __func__);
1817 }
1818 }
1819
1820 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1821
1822 if (inputsHaveBeenReshaped)
1823 {
1824 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1825 concatDim += tensorDimensionsAdded;
1826
1827 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1828 if (tensorDimensionsAdded == 1)
1829 {
1830 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1831 }
1832 else if (tensorDimensionsAdded == 2)
1833 {
1834 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1835 }
1836 }
1837
1838 // Check if permutations is required and get the pair of permutations required for the concatenation.
1839 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1840 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1841 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1842
1843 bool needPermute =
1844 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1845
1846 if (needPermute)
1847 {
1848 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1849 }
1850
1851 outputInfo.SetShape(outputShape);
1852
1853 // this is no-op for identity swizzles, otherwise it replaces both
1854 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001855 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1856 {
1857 return false;
1858 }
Mike Kellyb8805202019-07-31 17:25:43 +01001859
1860 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1861 armnn::OriginsDescriptor concatDescriptor;
1862
1863 try
1864 {
1865 // The concat descriptor is always created across the only supported concat dimension
1866 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1867 concatDescriptor =
1868 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1869 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001870 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001871 {
1872 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1873 }
1874
1875 // Validate the output shape is correct given the input shapes based on the
1876 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1877 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1878 {
1879 return Fail("%s: Error validating the output shape for concat", __func__);
1880 }
1881
1882 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1883 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1884 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1885
1886 bool isSupported = false;
1887 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1888 IsConcatSupported,
1889 data.m_Backends,
1890 isSupported,
1891 inputTensorInfos,
1892 outputInfo,
1893 concatDescriptor);
1894 if (!isSupported)
1895 {
1896 return false;
1897 }
1898
1899 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1900 assert(layer != nullptr);
1901 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1902
1903 // Connect inputs to the layer
1904 const int numInputSlots = layer->GetNumInputSlots();
1905 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1906 for (int i = 0; i < numInputSlots; ++i)
1907 {
1908 // connect the input directly to the merge (concat) layer
1909 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1910 }
1911
1912 if (needPermute)
1913 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00001914 armnn::PermuteDescriptor permuteDesc;
1915 permuteDesc.m_DimMappings = permutationPair.second;
1916
1917 bool isSupported = false;
1918 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1919 IsPermuteSupported,
1920 data.m_Backends,
1921 isSupported,
1922 layer->GetOutputSlot(0).GetTensorInfo(),
1923 outputInfo,
1924 permuteDesc);
1925 if (!isSupported)
1926 {
1927 return false;
1928 }
Mike Kellyb8805202019-07-31 17:25:43 +01001929 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1930 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1931 layer->GetOutputSlot(0),
1932 permutationPair.second);
1933 layer = &deswizzleLayer;
1934 }
1935
1936 if (inputsHaveBeenReshaped)
1937 {
1938 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1939
1940 // Undo the reshape knowing the amount of dimensions added
1941 if (tensorDimensionsAdded == 1)
1942 {
1943 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1944 afterConcatInfo.GetShape()[2] }));
1945 }
1946 else if (tensorDimensionsAdded == 2)
1947 {
1948 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1949 }
1950
Kevin Mayaed08ac2019-12-12 16:33:31 +00001951 armnn::ReshapeDescriptor reshapeDescriptor;
1952 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1953
1954 bool isSupported = false;
1955 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1956 IsReshapeSupported,
1957 data.m_Backends,
1958 isSupported,
1959 layer->GetOutputSlot(0).GetTensorInfo(),
1960 afterConcatInfo,
1961 reshapeDescriptor);
1962 if (!isSupported)
1963 {
1964 return false;
1965 }
1966
Mike Kellyb8805202019-07-31 17:25:43 +01001967 layer = &AddReshapeLayer(
1968 *data.m_Network,
1969 layer->GetOutputSlot(0),
1970 afterConcatInfo
1971 );
1972 }
1973
1974 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1975}
1976
1977template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001978 typename HalOperation = typename HalPolicy::Operation,
1979 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001980bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1981{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001982 using HalOperand = typename HalPolicy::Operand;
1983 using HalOperandType = typename HalPolicy::OperandType;
1984
1985 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001986 if (!input.IsValid())
1987 {
1988 return Fail("%s: Operation has invalid inputs", __func__);
1989 }
1990
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001991 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001992 if (!output)
1993 {
1994 return Fail("%s: Could not read output 0", __func__);
1995 }
1996
1997 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001998 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001999
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002000 if (IsDynamicTensor(outputInfo))
2001 {
2002 return Fail("%s: Dynamic output tensors are not supported", __func__);
2003 }
2004
Mike Kellyb5fdf382019-06-11 16:35:25 +01002005 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002006 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2007 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002008
2009 if (!weightsPin.IsValid() || !biasPin.IsValid())
2010 {
2011 return Fail("%s: Operation has invalid inputs", __func__);
2012 }
2013
2014 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002015 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002016 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2017
2018 armnn::Convolution2dDescriptor desc;
2019 desc.m_DataLayout = armnn::DataLayout::NHWC;
2020 ActivationFn activation;
2021
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002022 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002023 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002024 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2025 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2026 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2027 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2028 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2029 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002030 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002031 {
2032 return Fail("%s: Operation has invalid inputs", __func__);
2033 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002034 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002035 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002036 {
2037 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002038 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2039 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2040 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002041 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002042 {
2043 return Fail("%s: Operation has invalid inputs", __func__);
2044 }
2045
2046 const uint32_t kernelX = weights.GetShape()[2];
2047 const uint32_t kernelY = weights.GetShape()[1];
2048 const uint32_t inputX = inputInfo.GetShape()[2];
2049 const uint32_t inputY = inputInfo.GetShape()[1];
2050
2051 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2052 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002053 }
2054 else
2055 {
2056 return Fail("%s: Unsupported number of operation inputs", __func__);
2057 }
2058
2059 desc.m_BiasEnabled = true;
2060 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2061
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002062 bool isSupported = false;
2063 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2064 IsConvolution2dSupported,
2065 data.m_Backends,
2066 isSupported,
2067 inputInfo,
2068 outputInfo,
2069 desc,
2070 weights.GetInfo(),
2071 biases);
2072 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002073 {
2074 return false;
2075 }
2076
2077 armnn::IConnectableLayer* startLayer =
2078 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2079
2080 if (!startLayer)
2081 {
2082 return Fail("%s: AddConvolution2dLayer failed", __func__);
2083 }
2084
2085 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2086
2087 if (!endLayer)
2088 {
2089 return Fail("%s: ProcessActivation failed", __func__);
2090 }
2091
2092 input.Connect(startLayer->GetInputSlot(0));
2093
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002094 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002095}
2096
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002097template<typename HalPolicy,
2098 typename HalOperation = typename HalPolicy::Operation,
2099 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002100bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2101{
2102 using HalOperand = typename HalPolicy::Operand;
2103 using HalOperandType = typename HalPolicy::OperandType;
2104
2105 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2106 if (!input.IsValid() )
2107 {
2108 return Fail("%s: Operation has invalid inputs", __func__);
2109 }
2110
2111 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2112 unsigned int rank = inputInfo.GetNumDimensions();
2113 if (rank != 4)
2114 {
2115 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2116 }
2117
2118 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2119 if (!output)
2120 {
2121 return Fail("%s: Could not read output 0", __func__);
2122 }
2123
2124 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2125 if (IsDynamicTensor(outputInfo))
2126 {
2127 return Fail("%s: Dynamic output tensors are not supported", __func__);
2128 }
2129
2130 armnn::DepthToSpaceDescriptor descriptor;
2131
2132 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2133 if (descriptor.m_BlockSize <= 1)
2134 {
2135 return Fail("%s: Block size must be at least 1 in all dimensions");
2136 }
2137
2138 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2139 if (Is12Operand(*output))
2140 {
2141 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2142 }
2143
2144 bool isSupported = false;
2145 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2146 IsDepthToSpaceSupported,
2147 data.m_Backends,
2148 isSupported,
2149 inputInfo,
2150 outputInfo,
2151 descriptor);
2152 if (!isSupported)
2153 {
2154 return false;
2155 }
2156
2157 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2158 assert(layer != nullptr);
2159 input.Connect(layer->GetInputSlot(0));
2160
2161 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2162}
2163
2164template<typename HalPolicy,
2165 typename HalOperation = typename HalPolicy::Operation,
2166 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002167bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2168{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002169 using HalOperand = typename HalPolicy::Operand;
2170 using HalOperandType = typename HalPolicy::OperandType;
2171
2172 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002173
2174 if (!input.IsValid())
2175 {
2176 return Fail("%s: Operation has invalid inputs", __func__);
2177 }
2178
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002179 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002180
2181 if (!output)
2182 {
2183 return Fail("%s: Could not read output 0", __func__);
2184 }
2185
2186 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002187 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002188
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002189 if (IsDynamicTensor(outputInfo))
2190 {
2191 return Fail("%s: Dynamic output tensors are not supported", __func__);
2192 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002193
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002194 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002195 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002196 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002197
2198 if (weightsOperand == nullptr)
2199 {
2200 return Fail("%s: Operand is invalid", __func__);
2201 }
2202 armnn::DepthwiseConvolution2dDescriptor desc;
2203 desc.m_DataLayout = armnn::DataLayout::NHWC;
2204
Mike Kellyb5fdf382019-06-11 16:35:25 +01002205 // Reinterpret weight data as [ H, W, I, M ]
2206 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2207 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002208 inputInfo.GetShape()[3],
2209 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002210
2211 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2212 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2213
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002214 const ConstTensorPin weightsPin =
2215 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2216 1,
2217 model,
2218 data,
2219 HWIMToMIHW,
2220 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002221
2222 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002223 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002224
2225 if (!weightsPin.IsValid() || !biasPin.IsValid())
2226 {
2227 return Fail("%s: Operation has invalid inputs", __func__);
2228 }
2229
2230 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2231 armnn::ConstTensor bias = biasPin.GetConstTensor();
2232 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2233
2234 ActivationFn activation;
2235
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002236 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002237 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002238 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2239 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2240 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2241 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2242 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2243 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002244 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002245 {
2246 return Fail("%s: Operation has invalid inputs", __func__);
2247 }
2248 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002249 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002250 {
2251 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002252 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2253 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2254 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002255 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002256 {
2257 return Fail("%s: Operation has invalid inputs", __func__);
2258 }
2259
2260 const uint32_t kernelX = weights.GetShape()[3];
2261 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002262 const uint32_t inputX = inputInfo.GetShape()[2];
2263 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002264
2265 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2266 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2267 }
2268 else
2269 {
2270 return Fail("%s: Unsupported number of operation inputs", __func__);
2271 }
2272
2273 desc.m_BiasEnabled = true;
2274 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2275
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002276 bool isSupported = false;
2277 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2278 IsDepthwiseConvolutionSupported,
2279 data.m_Backends,
2280 isSupported,
2281 inputInfo,
2282 outputInfo,
2283 desc,
2284 weights.GetInfo(),
2285 biases);
2286 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002287 {
2288 return false;
2289 }
2290
2291 armnn::IConnectableLayer* startLayer =
2292 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2293 if (!startLayer)
2294 {
2295 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2296 }
2297
2298 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2299 if (!endLayer)
2300 {
2301 return Fail("%s: ProcessActivation failed", __func__);
2302 }
2303
2304 input.Connect(startLayer->GetInputSlot(0));
2305
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002306 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002307}
2308
Mike Kelly3c673942019-07-25 09:26:06 +01002309template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002310 typename HalOperation = typename HalPolicy::Operation,
2311 typename HalModel = typename HalPolicy::Model>
2312bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002313{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002314 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002315
2316 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2317 if (!input.IsValid())
2318 {
2319 return Fail("%s: Operation has invalid input", __func__);
2320 }
2321
Sadik Armagan98c0f662019-11-21 15:54:36 +00002322 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2323 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2324 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2325 {
2326 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2327 }
2328
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002329 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002330 if (!outputOperand)
2331 {
2332 return Fail("%s: Operation has invalid outputs", __func__);
2333 }
2334
2335 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2336 if (IsDynamicTensor(outputInfo))
2337 {
2338 return Fail("%s: Dynamic output tensors are not supported", __func__);
2339 }
2340
2341 bool isSupported = false;
2342 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2343 IsDequantizeSupported,
2344 data.m_Backends,
2345 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002346 inputInfo,
2347 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002348 if (!isSupported)
2349 {
2350 return false;
2351 }
2352
2353 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2354 assert(layer != nullptr);
2355 input.Connect(layer->GetInputSlot(0));
2356
2357 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2358}
2359
2360template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002361 typename HalOperation = typename HalPolicy::Operation,
2362 typename HalModel = typename HalPolicy::Model>
2363bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002364{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002365 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002366
2367 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2368 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2369
2370 if (!input0.IsValid() || !input1.IsValid())
2371 {
2372 return Fail("%s: Operation has invalid inputs", __func__);
2373 }
2374
2375 // The FuseActivation parameter is always the input index 2
2376 // and it should be optional
2377 ActivationFn activationFunction;
2378 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2379 {
2380 return Fail("%s: Operation has invalid inputs", __func__);
2381 }
2382
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002383 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002384 if (!output)
2385 {
2386 return Fail("%s: Could not read output 0", __func__);
2387 }
2388
2389 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2390 if (IsDynamicTensor(outputInfo))
2391 {
2392 return Fail("%s: Dynamic output tensors are not supported", __func__);
2393 }
2394
2395 bool isSupported = false;
2396 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2397 IsDivisionSupported,
2398 data.m_Backends,
2399 isSupported,
2400 input0.GetTensorInfo(),
2401 input1.GetTensorInfo(),
2402 outputInfo);
2403 if (!isSupported)
2404 {
2405 return false;
2406 }
2407
2408 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2409 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2410
2411 if (endLayer)
2412 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002413 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002414 if (!isReshapeSupported)
2415 {
2416 return false;
2417 }
2418
Mike Kelly46272802019-08-14 17:00:48 +01002419 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2420 }
2421 return Fail("%s: ProcessActivation failed", __func__);
2422}
2423
2424template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002425 typename HalOperation = typename HalPolicy::Operation,
2426 typename HalModel = typename HalPolicy::Model>
2427bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002428{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002429 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002430
2431 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2432 if (!input.IsValid())
2433 {
2434 return Fail("%s: Operation has invalid inputs", __func__);
2435 }
2436
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002437 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002438 if (!outputOperand)
2439 {
2440 return Fail("%s: Operation has invalid outputs", __func__);
2441 }
2442
2443 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2444 if (IsDynamicTensor(outputInfo))
2445 {
2446 return Fail("%s: Dynamic output tensors are not supported", __func__);
2447 }
2448
2449 bool isSupported = false;
2450 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2451 IsFloorSupported,
2452 data.m_Backends,
2453 isSupported,
2454 input.GetTensorInfo(),
2455 outputInfo);
2456 if (!isSupported)
2457 {
2458 return false;
2459 }
2460
2461 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2462 assert(layer != nullptr);
2463 input.Connect(layer->GetInputSlot(0));
2464
2465 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2466}
2467
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002468inline bool IsQSymm8(const V1_0::Operand&)
2469{
2470 return false;
2471}
2472
2473#ifdef ARMNN_ANDROID_NN_V1_2
2474
2475inline bool IsQSymm8(const V1_2::Operand& operand)
2476{
2477 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2478}
2479
2480#endif
2481
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002482enum class DequantizeStatus
2483{
2484 SUCCESS,
2485 NOT_REQUIRED,
2486 INVALID_OPERAND
2487};
2488
2489using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2490
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002491template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002492 typename HalOperation = typename HalPolicy::Operation,
2493 typename HalModel = typename HalPolicy::Model>
2494DequantizeResult DequantizeIfRequired(size_t operand_index,
2495 const HalOperation& operation,
2496 const HalModel& model,
2497 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002498{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002499 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002500
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002501 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002502 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002503 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002504 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002505 }
2506
2507 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2508 {
2509 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002510 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002511 }
2512
2513 const size_t weightsInputIndex = operation.inputs[operand_index];
2514
2515 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2516 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2517 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2518 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002519 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002520 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002521 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2522 {
2523 continue;
2524 }
2525
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002526 size_t outOpIndex = weightsInputIndex + 1;
2527 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002528 {
2529 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002530 }
2531
2532 if (outOpIndex != weightsInputIndex)
2533 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002534 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002535 }
2536
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002537 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002538 BOOST_ASSERT(operand);
2539
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002540 if (!IsQSymm8(*operand))
2541 {
2542 // Only supporting dequantize from QSYMM8 to FLOAT
2543 break;
2544 }
2545
2546 // Allocate a new buffer for the dequantized data and manually dequantize
2547 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2548 if (!startValue)
2549 {
2550 // Failed to get the operand address
2551 break;
2552 }
2553
2554 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2555 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002556 const float quantizationScale = operand->scale;
2557
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002558 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2559 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2560 {
2561 float* dstPtr = dequantizedBuffer.get();
2562 BOOST_ASSERT(dstPtr);
2563 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2564 }
2565
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002566 // Construct tensor info for dequantized ConstTensor
2567 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2568 operand->dimensions.data(),
2569 armnn::DataType::Float32);
2570
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002571 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2572 std::move(tensorInfo),
2573 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002574 }
2575
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002576 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002577}
2578
2579template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002580 typename HalOperation = typename HalPolicy::Operation,
2581 typename HalModel = typename HalPolicy::Model>
2582ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2583 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002584 const ConversionData& data,
2585 size_t operandIndex,
2586 bool optional = false)
2587{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002588 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2589
2590 DequantizeStatus status = std::get<3>(dequantized);
2591 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002592 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002593 case DequantizeStatus::INVALID_OPERAND:
2594 {
2595 // return invalid const tensor pin
2596 return ConstTensorPin();
2597 }
2598 case DequantizeStatus::NOT_REQUIRED:
2599 {
2600 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2601 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2602 }
2603 case DequantizeStatus::SUCCESS:
2604 default:
2605 {
2606 return ConstTensorPin(
2607 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2608 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002609 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002610}
2611
2612
Mike Kelly46272802019-08-14 17:00:48 +01002613template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002614 typename HalOperation = typename HalPolicy::Operation,
2615 typename HalModel = typename HalPolicy::Model>
2616bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002617{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002618 using HalOperand = typename HalPolicy::Operand;
2619
Mike Kelly46272802019-08-14 17:00:48 +01002620 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2621 if (!input.IsValid())
2622 {
2623 return Fail("%s: Operation has invalid inputs", __func__);
2624 }
2625
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002626 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002627 if (!output)
2628 {
2629 return Fail("%s: Could not read output 0", __func__);
2630 }
2631
2632 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2633 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2634
2635 if (IsDynamicTensor(outputInfo))
2636 {
2637 return Fail("%s: Dynamic output tensors are not supported", __func__);
2638 }
2639
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002640 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2641 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002642
2643 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002644 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002645 return Fail("%s: Operation has invalid weights", __func__);
2646 }
2647
2648 if (!biasPin.IsValid())
2649 {
2650 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002651 }
2652
2653 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2654 armnn::ConstTensor bias = biasPin.GetConstTensor();
2655 armnn::TensorInfo reshapedInfo = inputInfo;
2656
2657 try
2658 {
2659 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002660 }
2661 catch (const std::exception& e)
2662 {
Mike Kelly46272802019-08-14 17:00:48 +01002663 return Fail("%s: %s", __func__, e.what());
2664 }
2665
2666 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2667 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2668
2669 ActivationFn activationFunction;
2670 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2671 {
2672 return Fail("%s: Operation has invalid inputs", __func__);
2673 }
2674
2675 armnn::FullyConnectedDescriptor desc;
2676 desc.m_TransposeWeightMatrix = true;
2677 desc.m_BiasEnabled = true;
2678
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002679 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2680 weights.GetInfo().GetShape(),
2681 outputInfo.GetShape(),
2682 desc.m_TransposeWeightMatrix))
2683 {
2684 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2685 }
2686
Mike Kelly46272802019-08-14 17:00:48 +01002687 bool isSupported = false;
2688 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2689 IsFullyConnectedSupported,
2690 data.m_Backends,
2691 isSupported,
2692 reshapedInfo,
2693 outputInfo,
2694 weights.GetInfo(),
2695 bias.GetInfo(),
2696 desc);
2697 if (!isSupported)
2698 {
2699 return false;
2700 }
2701
2702 armnn::IConnectableLayer* startLayer =
2703 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2704 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2705
2706 if (endLayer != nullptr)
2707 {
2708 if (inputInfo.GetNumDimensions() > 2U)
2709 {
2710 armnn::ReshapeDescriptor reshapeDescriptor;
2711 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2712
2713 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2714 assert(reshapeLayer != nullptr);
2715 input.Connect(reshapeLayer->GetInputSlot(0));
2716 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2717 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2718 }
2719 else
2720 {
2721 input.Connect(startLayer->GetInputSlot(0));
2722 }
2723
2724 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2725 }
2726 else
2727 {
2728 return Fail("%s: ProcessActivation failed", __func__);
2729 }
2730}
2731
2732template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002733 typename HalOperation = typename HalPolicy::Operation,
2734 typename HalModel = typename HalPolicy::Model>
2735bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002736{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002737 using HalOperand = typename HalPolicy::Operand;
2738
Mike Kelly999e2092019-08-15 10:46:46 +01002739 if (operation.inputs.size() != 1)
2740 {
2741 return Fail("%s: Optional inputs are not supported", __func__);
2742 }
2743
Mike Kelly46272802019-08-14 17:00:48 +01002744 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2745 if (!input.IsValid())
2746 {
2747 return Fail("%s: Operation has invalid inputs", __func__);
2748 }
2749
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002750 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002751 if (!output)
2752 {
2753 return Fail("%s: Could not read output 0", __func__);
2754 }
2755
2756 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2757 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2758
2759 if (IsDynamicTensor(outputInfo))
2760 {
2761 return Fail("%s: Dynamic output tensors are not supported", __func__);
2762 }
2763 if (outputInfo.GetNumDimensions() != 4u)
2764 {
2765 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2766 }
2767
2768 armnn::L2NormalizationDescriptor desc;
2769 desc.m_DataLayout = armnn::DataLayout::NHWC;
2770
2771 bool isSupported = false;
2772 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2773 IsL2NormalizationSupported,
2774 data.m_Backends,
2775 isSupported,
2776 inputInfo,
2777 outputInfo,
2778 desc);
2779 if (!isSupported)
2780 {
2781 return false;
2782 }
2783
2784 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2785 assert(layer != nullptr);
2786 input.Connect(layer->GetInputSlot(0));
2787
2788 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2789}
2790
2791template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002792 typename HalOperation = typename HalPolicy::Operation,
2793 typename HalModel = typename HalPolicy::Model>
2794bool ConvertLocalResponseNormalization(const HalOperation& operation,
2795 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002796 ConversionData& data)
2797{
Mike Kelly999e2092019-08-15 10:46:46 +01002798 if (operation.inputs.size() != 5)
2799 {
2800 return Fail("%s: Optional inputs are not supported", __func__);
2801 }
2802
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002803 using HalOperand = typename HalPolicy::Operand;
2804 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002805
2806 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2807 if (!input.IsValid())
2808 {
2809 return Fail("%s: Operation has invalid inputs", __func__);
2810 }
2811
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002812 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002813 if (!output)
2814 {
2815 return Fail("%s: Could not read output 0", __func__);
2816 }
2817
2818 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2819 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2820
2821 if (IsDynamicTensor(outputInfo))
2822 {
2823 return Fail("%s: Dynamic output tensors are not supported", __func__);
2824 }
2825 if (outputInfo.GetNumDimensions() != 4u)
2826 {
2827 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2828 }
2829
2830 armnn::NormalizationDescriptor descriptor;
2831 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2832 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2833 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2834
2835 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002836 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002837 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2838 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2839 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2840 {
2841 return Fail("%s: Operation has invalid inputs", __func__);
2842 }
2843
2844 // ArmNN expects normSize to be the full size of the normalization
2845 // window rather than the radius as in AndroidNN.
2846 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2847
2848 bool isSupported = false;
2849 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2850 IsNormalizationSupported,
2851 data.m_Backends,
2852 isSupported,
2853 inputInfo,
2854 outputInfo,
2855 descriptor);
2856 if (!isSupported)
2857 {
2858 return false;
2859 }
2860
2861
2862 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2863 assert(layer != nullptr);
2864 input.Connect(layer->GetInputSlot(0));
2865
2866 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2867}
2868
2869template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002870 typename HalOperation = typename HalPolicy::Operation,
2871 typename HalModel = typename HalPolicy::Model>
2872bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002873{
Mike Kelly46272802019-08-14 17:00:48 +01002874 armnn::ActivationDescriptor desc;
2875 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2876
2877 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2878}
2879
2880template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002881 typename HalOperation = typename HalPolicy::Operation,
2882 typename HalModel = typename HalPolicy::Model>
2883bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002884{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002885 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002886
2887 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2888 if (!input.IsValid())
2889 {
2890 return Fail("%s: Operation has invalid inputs", __func__);
2891 }
2892
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002893 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002894 if (!output)
2895 {
2896 return Fail("%s: Could not read output 0", __func__);
2897 }
2898
2899 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2900 if (IsDynamicTensor(outputInfo))
2901 {
2902 return Fail("%s: Dynamic output tensors are not supported", __func__);
2903 }
2904
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002905 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002906 if (!axisOperand)
2907 {
2908 return Fail("%s: Could not read input 1", __func__);
2909 }
2910
2911 std::vector<int32_t> axis;
2912 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2913 {
2914 return Fail("%s: Input 1 has invalid values", __func__);
2915 }
2916
2917 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2918
2919 // Convert the axis to unsigned int and remove duplicates.
2920 unsigned int rank = inputInfo.GetNumDimensions();
2921 std::set<unsigned int> uniqueAxis;
2922 std::transform(axis.begin(), axis.end(),
2923 std::inserter(uniqueAxis, uniqueAxis.begin()),
2924 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2925
2926 // Get the "keep dims" flag.
2927 int32_t keepDims = 0;
2928 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2929 {
2930 return Fail("%s: Could not read input 2", __func__);
2931 }
2932
2933 armnn::MeanDescriptor descriptor;
2934 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2935 descriptor.m_KeepDims = keepDims > 0;
2936
2937 bool isSupported = false;
2938 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2939 IsMeanSupported,
2940 data.m_Backends,
2941 isSupported,
2942 inputInfo,
2943 outputInfo,
2944 descriptor);
2945 if (!isSupported)
2946 {
2947 return false;
2948 }
2949
2950 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2951 assert(layer != nullptr);
2952 input.Connect(layer->GetInputSlot(0));
2953
2954 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2955}
2956
2957template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002958 typename HalOperation = typename HalPolicy::Operation,
2959 typename HalModel = typename HalPolicy::Model>
2960bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002961{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002962 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002963
2964 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2965 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2966
2967 if (!input0.IsValid() || !input1.IsValid())
2968 {
2969 return Fail("%s: Operation has invalid inputs", __func__);
2970 }
2971
2972 // The FuseActivation parameter is always the input index 2
2973 // and it should be optional
2974 ActivationFn activationFunction;
2975 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2976 {
2977 return Fail("%s: Operation has invalid inputs", __func__);
2978 }
2979
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002980 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002981
2982 if (outputOperand == nullptr)
2983 {
2984 return false;
2985 }
2986
2987 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2988 if (IsDynamicTensor(outputInfo))
2989 {
2990 return Fail("%s: Dynamic output tensors are not supported", __func__);
2991 }
2992
2993 bool isSupported = false;
2994 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2995 IsMultiplicationSupported,
2996 data.m_Backends,
2997 isSupported,
2998 input0.GetTensorInfo(),
2999 input1.GetTensorInfo(),
3000 outputInfo);
3001 if (!isSupported)
3002 {
3003 return false;
3004 }
3005
3006 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3007 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3008
3009 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3010 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3011
3012 if (endLayer != nullptr)
3013 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003014 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003015 if (!isReshapeSupported)
3016 {
3017 return false;
3018 }
3019
Mike Kelly46272802019-08-14 17:00:48 +01003020 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3021 }
3022 else
3023 {
3024 return Fail("%s: ProcessActivation failed", __func__);
3025 }
3026}
3027
3028template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003029 typename HalOperation = typename HalPolicy::Operation,
3030 typename HalModel = typename HalPolicy::Model>
3031bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003032{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003033 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003034
Mike Kelly3c673942019-07-25 09:26:06 +01003035 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3036 if (!input.IsValid())
3037 {
3038 return Fail("%s: Operation has invalid inputs", __func__);
3039 }
3040
3041 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3042 unsigned int rank = inputInfo.GetNumDimensions();
3043
3044 armnn::PadDescriptor descriptor;
3045 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3046 {
3047 return Fail("%s: Could not convert paddings", __func__);
3048 }
3049
3050 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3051 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3052 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003053 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003054 {
3055 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3056 }
3057
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003058 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003059 if (!output)
3060 {
3061 return Fail("%s: Could not read output", __func__);
3062 }
3063
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003064 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003065 if (IsDynamicTensor(outputInfo))
3066 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003067 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003068 }
3069
3070 bool isSupported = false;
3071 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3072 IsPadSupported,
3073 data.m_Backends,
3074 isSupported,
3075 inputInfo,
3076 outputInfo,
3077 descriptor);
3078 if (!isSupported)
3079 {
3080 return false;
3081 }
3082
3083 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3084 assert(layer != nullptr);
3085 input.Connect(layer->GetInputSlot(0));
3086 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3087
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003088 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003089}
3090
Mike Kelly0a879362019-07-29 16:56:31 +01003091template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003092 typename HalOperation = typename HalPolicy::Operation,
3093 typename HalModel = typename HalPolicy::Model>
3094bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003095{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003096 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003097
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003098 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3099 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3100 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003101
3102 if (inputOperand == nullptr
3103 || requestedShapeOperand == nullptr
3104 || outputOperand == nullptr)
3105 {
3106 return Fail("%s: Operation has invalid inputs", __func__);
3107 }
3108
3109 if (requestedShapeOperand->dimensions.size() != 1)
3110 {
3111 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3112 __func__, requestedShapeOperand->dimensions.size());
3113 }
3114
3115 std::vector<int32_t> targetDimensions;
3116 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3117 {
3118 return Fail("%s: Could not read values of input 1", __func__);
3119 }
3120
3121 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3122
3123 Shape requestedShape;
3124 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3125 // function that resolves these values into a fully specified tensor shape.
3126 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3127 {
3128 return Fail("%s: Failed to resolve the requested shape", __func__);
3129 }
3130
3131 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3132 if (!SameShape(requestedShape, outputOperandShape))
3133 {
3134 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3135 }
3136
3137 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3138 if (!input.IsValid())
3139 {
3140 return Fail("%s: Could not read input 0", __func__);
3141 }
3142
3143 armnn::ReshapeDescriptor reshapeDescriptor;
3144 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3145 requestedShape.dimensions.data());
3146
3147 bool isSupported = false;
3148 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3149 IsReshapeSupported,
3150 data.m_Backends,
3151 isSupported,
3152 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003153 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003154 reshapeDescriptor);
3155 if (!isSupported)
3156 {
3157 return false;
3158 }
3159
3160 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3161 assert(layer != nullptr);
3162 input.Connect(layer->GetInputSlot(0));
3163
3164 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3165}
3166
3167template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003168 typename HalOperation = typename HalPolicy::Operation,
3169 typename HalModel = typename HalPolicy::Model>
3170bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003171{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003172 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003173
Mike Kelly0a879362019-07-29 16:56:31 +01003174 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3175 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3176
3177 if (!input0.IsValid() || !input1.IsValid())
3178 {
3179 return Fail("%s: Operation has invalid inputs", __func__);
3180 }
3181
3182 // The FuseActivation parameter is always the input index 2
3183 // and it should be optional
3184 ActivationFn activationFunction;
3185 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3186 {
3187 return Fail("%s: Operation has invalid inputs", __func__);
3188 }
3189
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003190 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003191 if (!output)
3192 {
3193 return Fail("%s: Could not read output 0", __func__);
3194 }
3195
3196 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3197 if (IsDynamicTensor(outputInfo))
3198 {
3199 return Fail("%s: Dynamic output tensors are not supported", __func__);
3200 }
3201
3202 bool isSupported = false;
3203 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3204 IsSubtractionSupported,
3205 data.m_Backends,
3206 isSupported,
3207 input0.GetTensorInfo(),
3208 input1.GetTensorInfo(),
3209 outputInfo);
3210 if (!isSupported)
3211 {
3212 return false;
3213 }
3214
3215 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3216 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3217
3218 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3219 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3220
3221 if (endLayer)
3222 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003223 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003224 if (!isReshapeSupported)
3225 {
3226 return false;
3227 }
Mike Kelly0a879362019-07-29 16:56:31 +01003228 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3229 }
3230
3231 return Fail("%s: ProcessActivation failed", __func__);
3232}
3233
Finn Williams23b87b32019-07-30 11:44:05 +01003234template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003235 typename HalOperation = typename HalPolicy::Operation,
3236 typename HalModel = typename HalPolicy::Model>
3237bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003238{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003239 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003240
3241 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3242 if (!input.IsValid())
3243 {
3244 return Fail("%s: Operation has invalid inputs", __func__);
3245 }
3246
3247 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3248 unsigned int rank = inputInfo.GetNumDimensions();
3249 if (rank > 4)
3250 {
3251 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3252 }
3253
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003254 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003255 if (!output)
3256 {
3257 return Fail("%s: Could not read output 0", __func__);
3258 }
3259
3260 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3261 {
3262 return Fail("%s: Dynamic output tensors are not supported", __func__);
3263 }
3264
3265 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3266 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003267 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003268
3269 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3270
3271 std::vector<int32_t> axis;
3272 if (!axisOperand)
3273 {
3274 axis.assign(dimensionSequence,
3275 dimensionSequence + rank);
3276 }
Mike Kelly789cf9a2020-02-18 10:03:30 +00003277 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003278 {
Mike Kelly789cf9a2020-02-18 10:03:30 +00003279 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003280 }
3281
3282 std::vector<uint32_t> outputDims;
3283 for (unsigned int i = 0; i < rank; i++)
3284 {
3285 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3286 auto currentDimension = inputInfo.GetShape()[i];
3287 if (skipSqueeze || currentDimension != 1)
3288 {
3289 outputDims.push_back(currentDimension);
3290 }
3291 }
3292
3293 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3294
3295 armnn::TensorInfo outputInfo = inputInfo;
3296 outputInfo.SetShape(outShape);
3297
3298 armnn::ReshapeDescriptor reshapeDesc;
3299 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3300
3301 bool isSupported = false;
3302 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3303 IsReshapeSupported,
3304 data.m_Backends,
3305 isSupported,
3306 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003307 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003308 reshapeDesc);
3309 if (!isSupported)
3310 {
3311 return false;
3312 }
3313
3314 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3315 assert(layer != nullptr);
3316 input.Connect(layer->GetInputSlot(0));
3317
3318 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3319}
3320
3321template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003322 typename HalOperation = typename HalPolicy::Operation,
3323 typename HalModel = typename HalPolicy::Model>
3324bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003325{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003326 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003327
3328 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3329 if (!input.IsValid())
3330 {
3331 return Fail("%s: Operation has invalid inputs", __func__);
3332 }
3333
3334 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3335 unsigned int rank = inputInfo.GetNumDimensions();
3336 if (rank > 4)
3337 {
3338 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3339 }
3340
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003341 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003342 if (!output)
3343 {
3344 return Fail("%s: Could not read output 0", __func__);
3345 }
3346
3347 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3348 if (IsDynamicTensor(outputInfo))
3349 {
3350 return Fail("%s: Dynamic output tensors are not supported", __func__);
3351 }
3352
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003353 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3354 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3355 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003356
3357 std::vector<int32_t> beginValues;
3358 std::vector<int32_t> endValues;
3359 std::vector<int32_t> stridesValues;
3360
3361 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003362 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003363 {
3364 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3365 {
3366 return false;
3367 }
3368
3369 if (operandValues.size() != rank)
3370 {
3371 return false;
3372 }
3373
3374 return true;
3375 };
3376
3377 if (!ValidateInputOperands(*beginOperand, beginValues)
3378 || !ValidateInputOperands(*endOperand, endValues)
3379 || !ValidateInputOperands(*stridesOperand, stridesValues))
3380 {
3381 return Fail("%s: Operation has invalid input operand", __func__);
3382 }
3383
3384 // Stride cannot have value '0'
3385 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3386 {
3387 return Fail("%s: Stride must be non-zero value.", __func__);
3388 }
3389
3390 armnn::StridedSliceDescriptor descriptor;
3391 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3392 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3393 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3394 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3395
3396 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3397 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3398 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3399 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3400 {
3401 return Fail("%s: Operation has invalid inputs", __func__);
3402 }
3403
3404 bool isSupported = false;
3405 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3406 IsStridedSliceSupported,
3407 data.m_Backends,
3408 isSupported,
3409 inputInfo,
3410 outputInfo,
3411 descriptor);
3412 if (!isSupported)
3413 {
3414 return false;
3415 }
3416
3417 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3418 assert(layer != nullptr);
3419 input.Connect(layer->GetInputSlot(0));
3420
3421 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3422}
3423
3424template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003425 typename HalOperation = typename HalPolicy::Operation,
3426 typename HalModel = typename HalPolicy::Model>
3427bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003428{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003429 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003430
3431 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3432 if (!input.IsValid())
3433 {
3434 return Fail("%s: Operation has invalid inputs", __func__);
3435 }
3436
3437 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3438 unsigned int rank = inputInfo.GetNumDimensions();
3439 if (rank > 4)
3440 {
3441 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3442 }
3443
3444 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3445 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003446 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003447
3448 std::vector<int32_t> perm(rank);
3449 if (!permOperand)
3450 {
3451 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3452 for (unsigned int i = rank; i > 0; i--)
3453 {
3454 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3455 }
3456 }
Mike Kelly789cf9a2020-02-18 10:03:30 +00003457 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003458 {
Mike Kelly789cf9a2020-02-18 10:03:30 +00003459 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003460 }
3461
3462 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3463
James Conroy1bde8e32020-01-22 16:40:57 +00003464 // Permutation vectors (outputDims) are given in ANN/Tf format, we must convert them to ArmNN format
3465 // For ANN/Tf/ACL: output[i] = input[ perm[i] ]
3466 // For ArmNN: output[ perm[i] ] = input[i]
3467 // e.g. 3,0,1,2 -> 1,2,3,0
3468 std::vector<unsigned int> armnnPermuteShape(rank);
3469 std::vector<unsigned int>::iterator it;
3470 for (unsigned int i = 0u; i < rank; ++i)
3471 {
3472 it = std::find(outputDims.begin(), outputDims.end(), i);
3473 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(outputDims.begin(), it));
3474 }
3475
Mike Kelly46272802019-08-14 17:00:48 +01003476 armnn::PermuteDescriptor permuteDesc;
James Conroy1bde8e32020-01-22 16:40:57 +00003477 permuteDesc.m_DimMappings = armnn::PermutationVector(armnnPermuteShape.data(), armnnPermuteShape.size());
Mike Kelly46272802019-08-14 17:00:48 +01003478
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003479 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003480 if (!output)
3481 {
3482 return Fail("%s: Could not read output 0", __func__);
3483 }
3484
3485 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003486 if (IsDynamicTensor(outputInfo))
3487 {
3488 return Fail("%s: Dynamic output tensors are not supported", __func__);
3489 }
3490
Mike Kelly46272802019-08-14 17:00:48 +01003491
3492 bool isSupported = false;
3493 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3494 IsPermuteSupported,
3495 data.m_Backends,
3496 isSupported,
3497 inputInfo,
3498 outputInfo,
3499 permuteDesc);
3500 if (!isSupported)
3501 {
3502 return false;
3503 }
3504
3505 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3506 assert(layer != nullptr);
3507 input.Connect(layer->GetInputSlot(0));
3508
3509 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3510}
3511
3512template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003513 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003514 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003515 typename HalModel = typename HalPolicy::Model>
3516bool ConvertBatchToSpaceNd(const HalOperation& operation,
3517 const HalModel& model,
3518 ConversionData& data)
3519{
Finn Williams23b87b32019-07-30 11:44:05 +01003520
3521 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3522 if (!input.IsValid())
3523 {
3524 return Fail("%s: Operation has invalid inputs", __func__);
3525 }
3526
3527 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3528 if (!output)
3529 {
3530 return Fail("%s: Could not read output 0", __func__);
3531 }
3532
3533 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3534 if (IsDynamicTensor(outputInfo))
3535 {
3536 return Fail("%s: Dynamic output tensors are not supported", __func__);
3537 }
3538
3539 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3540 if (!blockOperand)
3541 {
3542 return Fail("%s: Could not read input 1", __func__);
3543 }
3544
3545 // Convert the block operand to int32
3546 std::vector<int32_t> block;
3547 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3548 {
3549 return Fail("%s: Input 1 has invalid values", __func__);
3550 }
3551
3552 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3553
3554 unsigned int rank = inputInfo.GetNumDimensions();
3555 if (rank != 4)
3556 {
3557 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3558 }
3559
3560 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3561 {
3562 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3563 " greater than or equal to 1", __func__);
3564 }
3565
3566 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3567 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3568 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3569
3570 if (Is12Operand(*output))
3571 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003572 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003573 }
3574 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3575 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3576
3577 bool isSupported = false;
3578 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3579 IsBatchToSpaceNdSupported,
3580 data.m_Backends,
3581 isSupported,
3582 inputInfo,
3583 outputInfo,
3584 batchToSpaceNdDesc);
3585 if (!isSupported)
3586 {
3587 return false;
3588 }
3589
3590 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3591 assert(layer != nullptr);
3592 input.Connect(layer->GetInputSlot(0));
3593
3594 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3595}
Mike Kelly0a879362019-07-29 16:56:31 +01003596
Finn Williamsd74c5052019-07-30 17:06:00 +01003597template<typename HalPolicy,
3598 typename HalOperation = typename HalPolicy::Operation,
3599 typename HalOperand = typename HalPolicy::Operand,
3600 typename HalModel = typename HalPolicy::Model>
3601bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3602{
3603 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3604 if (!input.IsValid())
3605 {
3606 return Fail("%s: Operation has invalid inputs", __func__);
3607 }
3608
3609 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3610 unsigned int rank = inputInfo.GetNumDimensions();
3611 unsigned int spatialDim = rank - 2;
3612
3613 if (rank != 4)
3614 {
3615 Fail("%s: Only inputs with rank 4 are supported", __func__);
3616 }
3617
3618 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3619 if (!output)
3620 {
3621 return Fail("%s: Could not read output 0", __func__);
3622 }
3623
3624 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3625 if (IsDynamicTensor(outputInfo))
3626 {
3627 return Fail("%s: Dynamic output tensors are not supported", __func__);
3628 }
3629
3630 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3631 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3632
3633 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3634 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3635 {
3636 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3637 }
3638
3639 std::vector<int32_t> blockShape;
Mike Kelly789cf9a2020-02-18 10:03:30 +00003640 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3641 {
3642 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3643 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003644 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3645 {
3646 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3647 }
3648
3649 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3650 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3651 {
3652 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3653 }
3654
3655 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3656 std::vector<int32_t> paddings;
Mike Kelly789cf9a2020-02-18 10:03:30 +00003657 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3658 {
3659 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3660 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003661 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3662 {
3663 int paddingBeforeInput = paddings[i];
3664 int paddingAfterInput = paddings[i + 1];
3665 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3666 {
3667 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3668 }
3669
3670 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3671 }
3672
3673 armnn::SpaceToBatchNdDescriptor descriptor;
3674 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3675 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3676 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3677
3678 if (Is12Operand(*output))
3679 {
3680 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3681 }
3682
3683 bool isSupported = false;
3684 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3685 IsSpaceToBatchNdSupported,
3686 data.m_Backends,
3687 isSupported,
3688 inputInfo,
3689 outputInfo,
3690 descriptor);
3691 if (!isSupported)
3692 {
3693 return false;
3694 }
3695
3696 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3697 assert(layer != nullptr);
3698 input.Connect(layer->GetInputSlot(0));
3699
3700 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3701}
3702
saoste01b8471482018-10-10 09:44:51 +01003703} // namespace armnn_driver