blob: 550c320177860cb229981a6d57dcc1f950cb4a98 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
Keith Davis71006492020-01-06 17:44:16 +0000184// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
186{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000187 return type == V1_2::OperandType::BOOL ||
188 type == V1_2::OperandType::TENSOR_FLOAT16 ||
189 type == V1_2::OperandType::TENSOR_FLOAT32 ||
190 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000192 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
193 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194 type == V1_2::OperandType::TENSOR_INT32;
195}
196
197#endif
198
199inline bool IsBool(V1_0::Operand)
200{
201 return false;
202}
203
Sadik Armagan61113162019-07-25 09:09:40 +0100204inline bool Is12Operand(V1_0::Operand)
205{
206 return false;
207}
208
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209#ifdef ARMNN_ANDROID_NN_V1_2
210
211inline bool IsBool(V1_2::Operand operand)
212{
213 return operand.type == V1_2::OperandType::BOOL;
214}
215
Sadik Armagan61113162019-07-25 09:09:40 +0100216/// Checks if a operand is 1_2 Operand
217inline bool Is12Operand(V1_2::Operand)
218{
219 return true;
220}
221
Mike Kellyb5fdf382019-06-11 16:35:25 +0100222#endif
223
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100224template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000225armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
226 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 armnn::TensorInfo reshapeInfo)
228{
229 armnn::ReshapeDescriptor reshapeDescriptor;
230 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
231
232 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
233 BOOST_ASSERT(reshapeLayer != nullptr);
234
235 // Attach the input layer to the reshape layer
236 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
237 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
238
239 return *reshapeLayer;
240}
241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000242bool BroadcastTensor(LayerInputHandle& input0,
243 LayerInputHandle& input1,
244 armnn::IConnectableLayer* startLayer,
245 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100246{
247 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100248
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100249 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
250 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
251
252 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
253 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
254
255 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100256 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100257 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
258 input0.Connect(startLayer->GetInputSlot(0));
259 input1.Connect(startLayer->GetInputSlot(1));
260
Sadik Armagan64b19b52019-08-19 09:49:58 +0100261 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100262 }
263
264 // Since the number of dimensions do not match then we need to add degenerate dimensions
265 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
266
267 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
268 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
269 boost::numeric_cast<int>(inputDimensions1));
270
271 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
272 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
273 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
274
275 const armnn::TensorShape& smallShape = smallInfo.GetShape();
276 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
277 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
278 {
279 reshapedDimensions[i] = smallShape[i - sizeDifference];
280 }
281
282 armnn::TensorInfo reshapedInfo = smallInfo;
283 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
284 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100285
286 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
287 armnn::ReshapeDescriptor reshapeDescriptor;
288
289 bool isSupported = false;
290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
291 IsReshapeSupported,
292 data.m_Backends,
293 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000294 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100295 reshapedInfo,
296 reshapeDescriptor);
297 if (!isSupported)
298 {
299 return false;
300 }
301
302 BOOST_ASSERT(data.m_Network != nullptr);
303 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304
305 if (input0IsSmaller)
306 {
307 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
308 //
309 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100310 // | |
311 // Reshape |
312 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100314
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100315 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
316 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100317 }
318 else
319 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100320 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
321 //
322 // Input0 Input1
323 // | |
324 // | Reshape
325 // \ /
326 // StartLayer
327
arovir01b0717b52018-09-05 17:03:25 +0100328 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100329 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100330 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100331
332 return true;
arovir01b0717b52018-09-05 17:03:25 +0100333}
334
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000335void CalcPadding(uint32_t input,
336 uint32_t kernel,
337 uint32_t stride,
338 uint32_t& outPadHead,
339 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100340 android::nn::PaddingScheme scheme)
341{
342 int32_t padHead;
343 int32_t padTail;
344 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
345 outPadHead = boost::numeric_cast<uint32_t>(padHead);
346 outPadTail = boost::numeric_cast<uint32_t>(padTail);
347}
348
Mike Kelly86b36d42019-07-12 16:39:33 +0100349#ifdef ARMNN_ANDROID_NN_V1_2
350
351void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
352 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
353{
354 int32_t padHead;
355 int32_t padTail;
356 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
357 outPadHead = boost::numeric_cast<uint32_t>(padHead);
358 outPadTail = boost::numeric_cast<uint32_t>(padTail);
359}
360
Mike Kelly26123db2020-01-15 10:02:33 +0000361void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100362 int32_t& outPadTail, android::nn::PaddingScheme scheme)
363{
364 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
365}
366
Mike Kelly86b36d42019-07-12 16:39:33 +0100367#endif
368
Matthew Bentham912b3622019-05-03 15:49:14 +0100369Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100370{
371 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100372 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100373 shape.dimensions = operand.dimensions;
374 shape.scale = operand.scale;
375 shape.offset = operand.zeroPoint;
376 return shape;
377}
378
Mike Kelly46272802019-08-14 17:00:48 +0100379#ifdef ARMNN_ANDROID_NN_V1_2
380
381Shape GetOperandShape(const V1_2::Operand& operand)
382{
383 Shape shape;
384 shape.type = OperandType(operand.type);
385 shape.dimensions = operand.dimensions;
386 shape.scale = operand.scale;
387 shape.offset = operand.zeroPoint;
388 return shape;
389}
390
391#endif
392
arovir01b0717b52018-09-05 17:03:25 +0100393// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
394// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100395// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
396// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100397void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 const armnn::TensorInfo& weightInfo,
399 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100400{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000401 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100402 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000403 // NOTE: Bias scale is always set to 0 for per-axis quantization and
404 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
405 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100406 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000407 return biasScale * inputInfo.GetQuantizationScale();
408 };
409
410 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
411 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
412
413 biasInfo.SetQuantizationScales(biasScales);
414 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
415
416 ALOGV("Bias quantization params have been updated for per-axis quantization");
417 }
418 else
419 {
420 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
421 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
422 {
423 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
424 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
425 {
426 ALOGW("Bias quantization scale has been modified to match input * weights");
427 biasInfo.SetQuantizationScale(expectedBiasScale);
428 }
arovir01b0717b52018-09-05 17:03:25 +0100429 }
430 }
431}
432
433// 4D Tensor Permutations
434const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
435const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
436const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
437const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
438
439// 3D Permutation Vectors
440const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
441const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
442const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
443
444template<typename OSlot>
445armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
446 const armnn::PermutationVector& mappings)
447{
448 // Add swizzle layer
449 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
450
451 BOOST_ASSERT(layer != nullptr);
452
453 // Connect input to swizzle layer
454 input.Connect(layer->GetInputSlot(0));
455
456 // Setup swizzled output
457 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
458 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
459
460 return *layer;
461}
462
463void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
464{
465 // Add swizzle layer
466 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
467 // Connect swizzled input to layer
468 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
469}
470
471armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
472{
473 // Add deswizzle layer
474 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
475 return deswizzleLayer;
476}
477
478// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
479armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
480 LayerInputHandle& input,
481 armnn::IConnectableLayer& firstLayer,
482 armnn::IConnectableLayer& lastLayer)
483{
484 SwizzleIn(network, input, firstLayer, 0);
485 return DeswizzleOut(network, lastLayer, 0);
486}
487
488// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
489armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
490 armnn::IConnectableLayer& layer)
491{
492 return SwizzleInDeswizzleOut(network, input, layer, layer);
493}
494
495bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
496 const armnn::TensorShape & outputShape,
497 uint32_t concatDim)
498{
499 // Validate the output shape is correct given the input shapes (which have just been validated)
500 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
501 if (outputShape.GetNumDimensions() != numDimensions)
502 {
503 return Fail("%s: Output shape has wrong number of dimensions", __func__);
504 }
505
506 unsigned int outputSizeAlongConcatenatedDimension = 0;
507 for (unsigned int i = 0; i < inputShapes.size(); i++)
508 {
509 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
510 }
511
512 for (unsigned int i = 0; i < numDimensions; ++i)
513 {
514 if (i == concatDim)
515 {
516 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
517 {
518 return Fail(
519 "%s: Invalid output shape for dimension %d (%d != %d)",
520 __func__,
521 i,
522 outputShape[i],
523 outputSizeAlongConcatenatedDimension);
524 }
525 }
526 else
527 {
528 if (outputShape[i] != inputShapes[0][i])
529 {
530 return Fail("%s: Invalid output shape", __func__);
531 }
532 }
533 }
534
535 return true;
536}
537
538bool RequiresReshape(armnn::TensorShape & inputShape)
539{
540 return inputShape.GetNumDimensions() < 3;
541}
542
arovir01b0717b52018-09-05 17:03:25 +0100543void SwizzleInputs(armnn::INetwork& network,
544 std::vector<LayerInputHandle>& inputs,
545 std::vector<armnn::TensorShape>& inputShapes,
546 const armnn::PermutationVector& mapping)
547{
548 if (!mapping.IsEqual(IdentityPermutation4D))
549 {
550 size_t nInputs = inputs.size();
551 for (size_t i=0; i<nInputs; ++i)
552 {
553 // add swizzle layer
554 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
555 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
556 auto& outputInfo = outputSlot.GetTensorInfo();
557 // replace inputs with the swizzled ones
558 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
559 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
560 }
561 }
562}
563
Kevin Mayaed08ac2019-12-12 16:33:31 +0000564bool CheckReshapeSupported(ConversionData& data,
565 std::vector<LayerInputHandle>& inputs,
566 std::vector<armnn::TensorShape>& inputShapes,
567 const armnn::PermutationVector& mapping,
568 const armnn::TensorInfo& outputInfo)
569{
570 if (!mapping.IsEqual(IdentityPermutation4D))
571 {
572 size_t nInputs = inputs.size();
573 for (size_t i=0; i<nInputs; ++i)
574 {
575 // check permute layer
576 armnn::PermuteDescriptor permuteDesc;
577 permuteDesc.m_DimMappings = mapping;
578
579 bool isSupported = false;
580 FORWARD_LAYER_SUPPORT_FUNC(__func__,
581 IsPermuteSupported,
582 data.m_Backends,
583 isSupported,
584 inputs[i].GetTensorInfo(),
585 outputInfo,
586 permuteDesc);
587 if (!isSupported)
588 {
589 return false;
590 }
591
592 }
593 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
594 }
595 return true;
596}
597
598
narpra01f176d5a2018-11-18 20:17:48 +0000599bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
600 int32_t & concatDimension,
601 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100602{
narpra01f176d5a2018-11-18 20:17:48 +0000603 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100604 BOOST_ASSERT(numberOfDimensions >= 3);
605
606 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000607 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
608 // or along dimension 0 or 2 for a 3-D tensor.
609 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100610 {
narpra01f176d5a2018-11-18 20:17:48 +0000611 concatDimension = 1;
612 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
613 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100614 }
narpra01f176d5a2018-11-18 20:17:48 +0000615 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100616 {
narpra01f176d5a2018-11-18 20:17:48 +0000617 concatDimension = 0;
618 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
619 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100620 }
narpra01f176d5a2018-11-18 20:17:48 +0000621 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100622}
623
624} // anonymous namespace
625
626namespace armnn_driver
627{
628
629//// Creates an ArmNN activation layer and connects it to the given layer, if the
630//// passed in AndroidNN activation function requires so.
631//// @return The end layer of the sequence of layers built for the given AndroidNN
632//// activation function or nullptr if an error occurred (e.g. unsupported activation).
633//// Note that the end layer matches the input layer if no activation is required
634//// (the sequence of layers has length 1).
635armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
636 ActivationFn activation,
637 armnn::IConnectableLayer* prevLayer,
638 ConversionData& data);
639
640} // namespace armnn_driver
641
642///
643/// Utility templates
644///
645
646namespace armnn_driver
647{
648
649using namespace android::nn;
650
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651template<typename HalPolicy,
652 typename HalOperand = typename HalPolicy::Operand,
653 typename HalOperation = typename HalPolicy::Operation,
654 typename HalModel = typename HalPolicy::Model>
655const HalOperand* GetInputOperand(const HalOperation& operation,
656 uint32_t inputIndex,
657 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100658 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100659{
660 if (inputIndex >= operation.inputs.size())
661 {
saoste01b8471482018-10-10 09:44:51 +0100662 if (failOnIndexOutOfBounds)
663 {
664 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
665 }
arovir01b0717b52018-09-05 17:03:25 +0100666 return nullptr;
667 }
668
669 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
670 return &model.operands[operation.inputs[inputIndex]];
671}
672
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100673template<typename HalPolicy,
674 typename HalOperand = typename HalPolicy::Operand,
675 typename HalOperation = typename HalPolicy::Operation,
676 typename HalModel = typename HalPolicy::Model>
677const HalOperand* GetOutputOperand(const HalOperation& operation,
678 uint32_t outputIndex,
679 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100680{
681 if (outputIndex >= operation.outputs.size())
682 {
683 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
684 return nullptr;
685 }
686
687 // Model should have been validated beforehand
688 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
689
690 return &model.operands[operation.outputs[outputIndex]];
691}
692
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100694 typename HalOperand = typename HalPolicy::Operand,
695 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100696const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100697 const HalModel& model,
698 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000699 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100700{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100702
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100703 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100704 switch (operand.lifetime)
705 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100706 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100707 {
708 // Constant found in model.operandValues
709 valueStart = &model.operandValues[operand.location.offset];
710 break;
711 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100713 {
714 // Constant specified via a Memory object
715 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
716 break;
717 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100718 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000719 {
720 // An optional input tensor with no values is not an error so should not register as a fail
721 if (optional)
722 {
723 valueStart = nullptr;
724 break;
725 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100726 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000727 }
arovir01b0717b52018-09-05 17:03:25 +0100728 default:
729 {
730 // Unsupported/invalid (e.g. can't get value of an input to the model)
731 Fail("%s: unsupported/invalid operand lifetime: %s",
732 __func__, toString(operand.lifetime).c_str());
733 valueStart = nullptr;
734 }
735 }
736
737 return valueStart;
738}
739
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100741 typename HalOperation = typename HalPolicy::Operation,
742 typename HalModel = typename HalPolicy::Model,
743 typename HalOperandType = typename HalPolicy::OperandType>
744bool GetOperandType(const HalOperation& operation,
745 uint32_t inputIndex,
746 const HalModel& model,
747 HalOperandType& type)
748{
749 using HalOperand = typename HalPolicy::Operand;
750
751 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
752 if (!operand)
753 {
754 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
755 }
756
757 type = operand->type;
758 return true;
759}
760
761template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000762 typename HalOperand = typename HalPolicy::Operand>
763bool IsOperandConstant(const HalOperand& operand)
764{
765 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
766
767 HalOperandLifeTime lifetime = operand.lifetime;
768
769 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
770 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
771 lifetime == HalOperandLifeTime::NO_VALUE;
772}
773
774template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775 typename HalOperand = typename HalPolicy::Operand,
776 typename HalModel = typename HalPolicy::Model>
777ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
778 const HalModel& model,
779 const ConversionData& data,
780 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
781 const armnn::TensorShape* overrideTensorShape = nullptr,
782 bool optional = false)
783{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100784 if (!IsOperandTypeSupportedForTensors(operand.type))
785 {
786 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
787 return ConstTensorPin();
788 }
789
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000790 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100791 {
792 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
793 return ConstTensorPin();
794 }
795
796 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
797 if (!valueStart)
798 {
799 if (optional)
800 {
801 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
802 return ConstTensorPin(true);
803 }
804 // mandatory tensor with no values
805 Fail("%s: failed to get operand address", __func__);
806 return ConstTensorPin();
807 }
808
809 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000810 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
811 if (tensorInfo.HasPerAxisQuantization())
812 {
813 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
814 }
815
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100816 if (overrideTensorShape != nullptr)
817 {
818 tensorInfo.SetShape(*overrideTensorShape);
819 }
820 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
821}
822
823template<typename HalPolicy,
824 typename HalOperation = typename HalPolicy::Operation,
825 typename HalModel = typename HalPolicy::Model>
826ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
827 uint32_t inputIndex,
828 const HalModel& model,
829 const ConversionData& data,
830 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
831 const armnn::TensorShape* overrideTensorShape = nullptr,
832 bool optional = false)
833{
834 using HalOperand = typename HalPolicy::Operand;
835
836 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
837 if (!operand)
838 {
839 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
840 return ConstTensorPin();
841 }
842 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
843 model,
844 data,
845 dimensionMappings,
846 overrideTensorShape,
847 optional);
848}
849
850template<typename HalPolicy,
851 typename OutputType,
852 typename HalOperandType = typename HalPolicy::OperandType,
853 typename HalOperation = typename HalPolicy::Operation,
854 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100855bool GetInputScalar(const HalOperation& operation,
856 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100857 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100858 OutputType& outValue,
859 const HalModel& model,
860 const ConversionData& data)
861{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100862 using HalOperand = typename HalPolicy::Operand;
863
864 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100865 if (!operand)
866 {
867 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
868 }
869
870 if (operand->type != type)
871 {
872 return Fail("%s: unexpected operand type: %s (should be %s)",
873 __func__, toString(operand->type).c_str(), toString(type).c_str());
874 }
875
876 if (operand->location.length != sizeof(OutputType))
877 {
878 return Fail("%s: incorrect operand location length: %i (should be %i)",
879 __func__, operand->location.length, sizeof(OutputType));
880 }
881
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100882 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100883 if (!valueAddress)
884 {
885 return Fail("%s: failed to get address for operand", __func__);
886 }
887
888 outValue = *(static_cast<const OutputType*>(valueAddress));
889 return true;
890}
891
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892template<typename HalPolicy,
893 typename HalOperation = typename HalPolicy::Operation,
894 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100895bool GetInputInt32(const HalOperation& operation,
896 uint32_t inputIndex,
897 int32_t& outValue,
898 const HalModel& model,
899 const ConversionData& data)
900{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100901 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100902}
903
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100904template<typename HalPolicy,
905 typename HalOperation = typename HalPolicy::Operation,
906 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100907bool GetInputFloat32(const HalOperation& operation,
908 uint32_t inputIndex,
909 float& outValue,
910 const HalModel& model,
911 const ConversionData& data)
912{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100913 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100914}
915
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100916template<typename HalPolicy,
917 typename HalOperation = typename HalPolicy::Operation,
918 typename HalOperandType = typename HalPolicy::OperandType,
919 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100920bool GetInputActivationFunctionImpl(const HalOperation& operation,
921 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100922 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100923 ActivationFn& outActivationFunction,
924 const HalModel& model,
925 const ConversionData& data)
926{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100927 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100928 {
929 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
930 __func__,
931 toString(type).c_str(),
932 toString(OperandType::INT32).c_str(),
933 toString(OperandType::TENSOR_INT32).c_str());
934 }
935
936 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100938 {
939 return Fail("%s: failed to get activation input value", __func__);
940 }
941 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
942 return true;
943}
944
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945template<typename HalPolicy,
946 typename HalOperation = typename HalPolicy::Operation,
947 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100948bool GetInputActivationFunction(const HalOperation& operation,
949 uint32_t inputIndex,
950 ActivationFn& outActivationFunction,
951 const HalModel& model,
952 const ConversionData& data)
953{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100954 return GetInputActivationFunctionImpl<HalPolicy>(operation,
955 inputIndex,
956 HalPolicy::OperandType::INT32,
957 outActivationFunction,
958 model,
959 data);
arovir01b0717b52018-09-05 17:03:25 +0100960}
961
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962template<typename HalPolicy,
963 typename HalOperation = typename HalPolicy::Operation,
964 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100965bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
966 uint32_t inputIndex,
967 ActivationFn& outActivationFunction,
968 const HalModel& model,
969 const ConversionData& data)
970{
971 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972 return GetInputActivationFunctionImpl<HalPolicy>(operation,
973 inputIndex,
974 HalPolicy::OperandType::INT32,
975 outActivationFunction,
976 model,
977 data);
arovir01b0717b52018-09-05 17:03:25 +0100978}
979
980
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981template<typename HalPolicy,
982 typename HalOperation = typename HalPolicy::Operation,
983 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100984bool GetOptionalInputActivation(const HalOperation& operation,
985 uint32_t inputIndex,
986 ActivationFn& activationFunction,
987 const HalModel& model,
988 const ConversionData& data)
989{
990 if (operation.inputs.size() <= inputIndex)
991 {
992 activationFunction = ActivationFn::kActivationNone;
993 }
994 else
995 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100997 {
998 return Fail("%s: Operation has invalid inputs", __func__);
999 }
1000 }
1001 return true;
1002}
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004template<typename HalPolicy,
1005 typename ConvolutionDescriptor,
1006 typename HalOperation = typename HalPolicy::Operation,
1007 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001008bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1009 uint32_t dilationXIndex,
1010 ConvolutionDescriptor& descriptor,
1011 const HalModel& model,
1012 const ConversionData& data)
1013{
1014 bool success = true;
1015 if (operation.inputs.size() >= dilationXIndex + 2)
1016 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017 success &= GetInputScalar<HalPolicy>(operation,
1018 dilationXIndex,
1019 HalPolicy::OperandType::INT32,
1020 descriptor.m_DilationX,
1021 model,
1022 data);
1023 success &= GetInputScalar<HalPolicy>(operation,
1024 dilationXIndex + 1,
1025 HalPolicy::OperandType::INT32,
1026 descriptor.m_DilationY,
1027 model,
1028 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001029 }
1030
1031 return success;
1032}
1033
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001034template<typename HalPolicy,
1035 typename HalOperand = typename HalPolicy::Operand,
1036 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001037bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001038 std::vector<int32_t>& outValues,
1039 const HalModel& model,
1040 const ConversionData& data)
1041{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001043 {
1044 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1045 }
1046
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001048 if (!startAddress)
1049 {
1050 return Fail("%s: failed to get operand address", __func__, operand.type);
1051 }
1052
1053 // Check number of bytes is sensible
1054 const uint32_t numBytes = operand.location.length;
1055 if (numBytes % sizeof(int32_t) != 0)
1056 {
1057 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1058 __func__, numBytes, sizeof(int32_t));
1059 }
1060
1061 outValues.resize(numBytes / sizeof(int32_t));
1062 memcpy(outValues.data(), startAddress, numBytes);
1063 return true;
1064}
1065
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001066template<typename HalPolicy,
1067 typename HalOperation = typename HalPolicy::Operation,
1068 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001069bool GetInputPaddingScheme(const HalOperation& operation,
1070 uint32_t inputIndex,
1071 PaddingScheme& outPaddingScheme,
1072 const HalModel& model,
1073 const ConversionData& data)
1074{
1075 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001077 {
1078 return Fail("%s: failed to get padding scheme input value", __func__);
1079 }
1080
1081 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1082 return true;
1083}
1084
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001085template<typename HalPolicy,
1086 typename HalOperation = typename HalPolicy::Operation,
1087 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001088LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1089 uint32_t inputIndex,
1090 const HalModel& model,
1091 ConversionData& data)
1092{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001093 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001094 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001095 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1096
1097 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001098 if (!operand)
1099 {
1100 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1101 return LayerInputHandle();
1102 }
1103
1104 if (!IsOperandTypeSupportedForTensors(operand->type))
1105 {
1106 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1107 return LayerInputHandle();
1108 }
1109
Sadik Armagan44bcc022019-06-18 17:21:36 +01001110 try
arovir01b0717b52018-09-05 17:03:25 +01001111 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001112 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001113 if (IsDynamicTensor(operandTensorInfo))
1114 {
1115 Fail("%s: dynamic input tensors are not supported", __func__);
1116 return LayerInputHandle();
1117 }
arovir01b0717b52018-09-05 17:03:25 +01001118
Sadik Armagan44bcc022019-06-18 17:21:36 +01001119 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001120 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001121 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001122 {
1123 // NOTE: We must check whether we can support the input tensor on at least one
1124 // of the provided backends; otherwise we cannot convert the operation
1125 bool isInputSupported = false;
1126 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1127 IsInputSupported,
1128 data.m_Backends,
1129 isInputSupported,
1130 operandTensorInfo);
1131
1132 if (!isInputSupported)
1133 {
1134 Fail("%s: unsupported input tensor", __func__);
1135 return LayerInputHandle();
1136 }
1137
1138 BOOST_FALLTHROUGH; // intentional fallthrough
1139 }
1140 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001141 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001142 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001143 // The tensor is either an operand internal to the model, or a model input.
1144 // It can be associated with an ArmNN output slot for an existing layer.
1145
1146 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1147 const uint32_t operandIndex = operation.inputs[inputIndex];
1148 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001149 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001150 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001151 case HalOperandLifeTime::CONSTANT_REFERENCE:
1152 {
1153 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1154 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1155 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001156 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001157 bool isSupported = false;
1158 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1159 IsConstantSupported,
1160 data.m_Backends,
1161 isSupported,
1162 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001163 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001164 {
1165 return LayerInputHandle();
1166 }
1167
1168 armnn::IConnectableLayer* constantLayer =
1169 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1170 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1171 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1172
1173 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1174 }
1175 else
1176 {
1177 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001178 return LayerInputHandle();
1179 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001180 break;
arovir01b0717b52018-09-05 17:03:25 +01001181 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001182 default:
arovir01b0717b52018-09-05 17:03:25 +01001183 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001184 // Unsupported lifetime for an input tensor
1185 Fail("%s: unsupported lifetime for input tensor: %s",
1186 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001187 return LayerInputHandle();
1188 }
arovir01b0717b52018-09-05 17:03:25 +01001189 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001190 }
1191 catch (UnsupportedOperand<HalOperandType>& e)
1192 {
1193 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1194 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001195 }
1196}
1197
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001198template<typename HalPolicy,
1199 typename HalOperation = typename HalPolicy::Operation,
1200 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001201bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1202 uint32_t operationOutputIndex,
1203 armnn::IConnectableLayer& layer,
1204 uint32_t layerOutputIndex,
1205 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001206 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001207{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001208 using HalOperand = typename HalPolicy::Operand;
1209
1210 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001211 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1212 {
1213 return false;
1214 }
1215
1216 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1217
1218 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1219 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1220
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001221 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001222
1223 return true;
1224}
1225
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001226template<typename HalPolicy,
1227 typename HalOperation = typename HalPolicy::Operation,
1228 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001229armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1230 uint32_t inputIndex,
1231 const HalModel& model,
1232 ConversionData& data)
1233{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001234 using HalOperand = typename HalPolicy::Operand;
1235
1236 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001237 if (!operand)
1238 {
1239 return armnn::DataLayout::NHWC;
1240 }
1241
1242 if (!IsBool(*operand))
1243 {
1244 return armnn::DataLayout::NHWC;
1245 }
1246
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001247 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001248 if (!valueAddress)
1249 {
1250 return armnn::DataLayout::NHWC;
1251 }
1252
1253 if (*(static_cast<const bool*>(valueAddress)))
1254 {
1255 return armnn::DataLayout::NCHW;
1256 }
1257 else
1258 {
1259 return armnn::DataLayout::NHWC;
1260 }
1261}
1262
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001263template<typename HalPolicy,
1264 typename HalOperation = typename HalPolicy::Operation,
1265 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001266bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1267 uint32_t outputIndex,
1268 armnn::IConnectableLayer& layer,
1269 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001270 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001271{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001272 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1273 outputIndex,
1274 layer,
1275 outputIndex,
1276 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001277 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001278}
1279
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001280template<typename HalPolicy,
1281 typename HalOperation = typename HalPolicy::Operation,
1282 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001283bool ConvertToActivation(const HalOperation& operation,
1284 const char* operationName,
1285 const armnn::ActivationDescriptor& activationDesc,
1286 const HalModel& model,
1287 ConversionData& data)
1288{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001289 using HalOperand = typename HalPolicy::Operand;
1290
1291 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001292 if (!input.IsValid())
1293 {
1294 return Fail("%s: Input 0 is invalid", operationName);
1295 }
1296
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001297 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001298 if (!outputOperand)
1299 {
1300 return false;
1301 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001302
1303 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001304 if (IsDynamicTensor(outInfo))
1305 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001306 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001307 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001308
1309 bool isSupported = false;
1310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1311 IsActivationSupported,
1312 data.m_Backends,
1313 isSupported,
1314 input.GetTensorInfo(),
1315 outInfo,
1316 activationDesc);
1317 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001318 {
1319 return false;
1320 }
1321
1322 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1323 BOOST_ASSERT(layer != nullptr);
1324 input.Connect(layer->GetInputSlot(0));
1325
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001326 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001327}
1328
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001329template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001330 typename HalOperation = typename HalPolicy::Operation,
1331 typename HalModel = typename HalPolicy::Model>
1332bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1333{
1334 armnn::ActivationDescriptor desc;
1335 desc.m_Function = armnn::ActivationFunction::ReLu;
1336
1337 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1338}
1339
1340template<typename HalPolicy,
1341 typename HalOperation = typename HalPolicy::Operation,
1342 typename HalModel = typename HalPolicy::Model>
1343bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1344{
1345 armnn::ActivationDescriptor desc;
1346 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1347 desc.m_A = 1.0f;
1348 desc.m_B = -1.0f;
1349
1350 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1351}
1352
1353template<typename HalPolicy,
1354 typename HalOperation = typename HalPolicy::Operation,
1355 typename HalModel = typename HalPolicy::Model>
1356bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1357{
1358 armnn::ActivationDescriptor desc;
1359 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1360 desc.m_A = 6.0f;
1361
1362 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1363}
1364
1365template<typename HalPolicy,
1366 typename HalOperation = typename HalPolicy::Operation,
1367 typename HalModel = typename HalPolicy::Model>
1368bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1369{
1370 armnn::ActivationDescriptor desc;
1371 desc.m_Function = armnn::ActivationFunction::TanH;
1372 desc.m_A = 1.0f; // android nn does not support tanH parameters
1373 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1374
1375 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1376}
1377
1378template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001379 typename HalOperation = typename HalPolicy::Operation,
1380 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001381bool ConvertPaddings(const HalOperation& operation,
1382 const HalModel& model,
1383 ConversionData& data,
1384 unsigned int rank,
1385 armnn::PadDescriptor& padDescriptor)
1386{
1387 using HalOperand = typename HalPolicy::Operand;
1388
1389 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1390 if (!paddingsOperand)
1391 {
1392 return Fail("%s: Could not read paddings operand", __func__);
1393 }
1394
1395 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1396 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1397 {
1398 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1399 }
1400
1401 std::vector<int32_t> paddings;
1402 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1403
1404 // add padding for each dimension of input tensor.
1405 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1406 {
1407 int paddingBeforeInput = paddings[i];
1408 int paddingAfterInput = paddings[i + 1];
1409
1410 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1411 {
1412 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1413 }
1414
1415 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1416 }
1417
1418 return true;
1419}
1420
1421template<typename HalPolicy,
1422 typename HalOperation = typename HalPolicy::Operation,
1423 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001424bool ConvertPooling2d(const HalOperation& operation,
1425 const char* operationName,
1426 armnn::PoolingAlgorithm poolType,
1427 const HalModel& model,
1428 ConversionData& data)
1429{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001430 using HalOperand = typename HalPolicy::Operand;
1431 using HalOperandType = typename HalPolicy::OperandType;
1432
1433 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001434 if (!input.IsValid())
1435 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001436 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001437 }
1438
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001439 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001440 if (!output)
1441 {
1442 return Fail("%s: Could not read output 0", __func__);
1443 }
1444
1445 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1446 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1447
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001448 if (IsDynamicTensor(outputInfo))
1449 {
1450 return Fail("%s: Dynamic output tensors are not supported", __func__);
1451 }
1452
arovir01b0717b52018-09-05 17:03:25 +01001453 armnn::Pooling2dDescriptor desc;
1454 desc.m_PoolType = poolType;
1455 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001456 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001457
1458 ActivationFn activation;
1459
Sadik Armagan15d63e22019-07-26 16:59:35 +01001460 auto inputSize = operation.inputs.size();
1461
1462 if (inputSize >= 10)
1463 {
1464 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1465 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1466 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1467 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1468 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1469 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1470 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1471 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1472 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1473 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1474 {
1475 return Fail("%s: Operation has invalid inputs", operationName);
1476 }
1477
1478 if (Is12Operand(*output))
1479 {
1480 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1481 }
1482 }
1483 else
arovir01b0717b52018-09-05 17:03:25 +01001484 {
1485 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1486 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001487 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1488 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1489 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1490 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1491 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1492 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001493 {
1494 return Fail("%s: Operation has invalid inputs", operationName);
1495 }
1496
Sadik Armagan15d63e22019-07-26 16:59:35 +01001497 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001498 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001499 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001500 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001501
1502 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1503 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1504 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1505
1506 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1507 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001508 }
1509
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001510 bool isSupported = false;
1511 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1512 IsPooling2dSupported,
1513 data.m_Backends,
1514 isSupported,
1515 inputInfo,
1516 outputInfo,
1517 desc);
1518 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001519 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001520 return false;
arovir01b0717b52018-09-05 17:03:25 +01001521 }
arovir01b0717b52018-09-05 17:03:25 +01001522
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001523 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1524 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001525 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001526 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001527 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001528
1529 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1530 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001531 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001532 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001533 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001534
1535 input.Connect(pooling2dLayer->GetInputSlot(0));
1536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001537 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001538}
1539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001540template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001541 typename HalOperation = typename HalPolicy::Operation,
1542 typename HalModel = typename HalPolicy::Model>
1543bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001544{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001545 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001546
1547 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1548 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1549
1550 if (!input0.IsValid() || !input1.IsValid())
1551 {
1552 return Fail("%s: Operation has invalid inputs", __func__);
1553 }
1554
1555 // The FuseActivation parameter is always the input index 2
1556 // and it should be optional
1557 ActivationFn activationFunction;
1558 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1559 {
1560 return Fail("%s: Operation has invalid inputs", __func__);
1561 }
1562
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001563 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001564 if (!outputOperand)
1565 {
1566 return false;
1567 }
1568
1569 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1570 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1571
1572 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1573 if (IsDynamicTensor(outputInfo))
1574 {
1575 return Fail("%s: Dynamic output tensors are not supported", __func__);
1576 }
1577
1578 bool isSupported = false;
1579 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1580 IsAdditionSupported,
1581 data.m_Backends,
1582 isSupported,
1583 inputInfo0,
1584 inputInfo1,
1585 outputInfo);
1586 if (!isSupported)
1587 {
1588 return false;
1589 }
1590
1591 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1592 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1593
1594 if (endLayer != nullptr)
1595 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001596 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001597 if (!isReshapeSupported)
1598 {
1599 return false;
1600 }
1601
Mike Kelly46272802019-08-14 17:00:48 +01001602 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1603 }
1604 else
1605 {
1606 return Fail("%s: ProcessActivation failed", __func__);
1607 }
1608}
1609
1610template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001611 typename HalOperation = typename HalPolicy::Operation,
1612 typename HalModel = typename HalPolicy::Model>
1613bool ConvertArgMinMax(const HalOperation& operation,
1614 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001615 ConversionData& data,
1616 armnn::ArgMinMaxFunction argMinMaxFunction)
1617{
1618 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1619
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001620 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001621 using HalOperandType = typename HalPolicy::OperandType;
1622
1623 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1624
1625 if (!input0.IsValid())
1626 {
1627 return Fail("%s: Operation has invalid inputs", __func__);
1628 }
1629
1630 int32_t axis;
1631 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1632 {
1633 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1634 }
1635
1636 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1637 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1638
1639 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1640 {
1641 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1642 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1643 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1644 return Fail("%s: Axis must be in range [-n, n)", __func__);
1645 }
1646
1647 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1648 if (!output)
1649 {
1650 return Fail("%s: Could not read output 0", __func__);
1651 }
1652
1653 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1654
1655 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1656 if (IsDynamicTensor(outputInfo))
1657 {
1658 return Fail("%s: Dynamic output tensors are not supported", __func__);
1659 }
1660
1661 armnn::ArgMinMaxDescriptor descriptor;
1662 descriptor.m_Function = argMinMaxFunction;
1663 descriptor.m_Axis = axis;
1664
1665 bool isSupported = false;
1666 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1667 IsArgMinMaxSupported,
1668 data.m_Backends,
1669 isSupported,
1670 inputInfo0,
1671 outputInfo,
1672 descriptor);
1673 if (!isSupported)
1674 {
1675 return false;
1676 }
1677
1678 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1679 assert(layer != nullptr);
1680
1681 input0.Connect(layer->GetInputSlot(0));
1682
1683 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1684}
1685
1686template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001687 typename HalOperation = typename HalPolicy::Operation,
1688 typename HalModel = typename HalPolicy::Model>
1689bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001690{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001691 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001692 using HalOperandType = typename HalPolicy::OperandType;
1693
1694 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1695 if (operation.inputs.size() <= 1)
1696 {
1697 return Fail("%s: Operation has insufficient arguments", __func__);
1698 }
1699
1700 // Get inputs and outputs
1701 const std::size_t numInputTensors = operation.inputs.size() - 1;
1702
1703 int32_t concatDim;
1704 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1705 {
1706 return Fail("%s: Operation has invalid inputs", __func__);
1707 }
1708
1709 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1710 if (!outputOperand)
1711 {
1712 return Fail("%s: Operation has no outputs", __func__);
1713 }
1714
1715
1716 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1717 armnn::TensorShape outputShape = outputInfo.GetShape();
1718
1719 //
1720 // handle negative concat dims along the lines of tensorflow as described here:
1721 // https://www.tensorflow.org/api_docs/python/tf/concat
1722 // "negative axis refers to axis + rank(values)-th dimension"
1723 //
1724 if (concatDim < 0)
1725 {
1726 concatDim += outputShape.GetNumDimensions();
1727 }
1728
1729 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1730 {
1731 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1732 }
1733
1734 std::vector<LayerInputHandle> inputHandles;
1735 std::vector<armnn::TensorShape> inputShapes;
1736
1737 inputHandles.reserve(numInputTensors);
1738 inputShapes.reserve(numInputTensors);
1739
1740 bool inputsHaveBeenReshaped = false;
1741 unsigned int tensorDimensionsAdded = 0;
1742
1743 for (uint32_t i = 0; i < numInputTensors; ++i)
1744 {
1745 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1746 if (!operand)
1747 {
1748 return Fail("%s: Operation has invalid inputs", __func__);
1749 }
1750
Teresa Charlin3b959602019-10-31 17:05:47 +00001751 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1752 if (!operandInputHandle.IsValid())
1753 {
1754 return Fail("%s: Operation has invalid inputs", __func__);
1755 }
Mike Kellyb8805202019-07-31 17:25:43 +01001756
Teresa Charlin3b959602019-10-31 17:05:47 +00001757 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001758 if (operandShape.GetNumDimensions() == 0)
1759 {
1760 return Fail("%s: Operands with rank 0 are not supported", __func__);
1761 }
1762
1763 if (RequiresReshape(operandShape))
1764 {
1765 inputsHaveBeenReshaped = true;
1766
1767 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1768
1769 // Expand the tensor to three dimensions
1770 if (operandShape.GetNumDimensions() == 2)
1771 {
1772 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1773 tensorDimensionsAdded = 1;
1774 }
1775 else
1776 {
1777 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1778 tensorDimensionsAdded = 2;
1779 }
1780
Kevin Mayaed08ac2019-12-12 16:33:31 +00001781 armnn::ReshapeDescriptor reshapeDescriptor;
1782 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1783
1784 bool isSupported = false;
1785 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1786 IsReshapeSupported,
1787 data.m_Backends,
1788 isSupported,
1789 operandInputHandle.GetTensorInfo(),
1790 reshapeInfo,
1791 reshapeDescriptor);
1792 if (!isSupported)
1793 {
1794 return false;
1795 }
1796
Mike Kellyb8805202019-07-31 17:25:43 +01001797 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1798 *data.m_Network,
1799 operandInputHandle,
1800 reshapeInfo
1801 );
1802
1803 // Point to the reshape operation rather then the input operation
1804 operandShape = reshapeInfo.GetShape();
1805 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1806 }
1807
1808 inputShapes.emplace_back(operandShape);
1809 inputHandles.emplace_back(operandInputHandle);
1810
1811 if (!inputHandles.back().IsValid())
1812 {
1813 return Fail("%s: Operation has invalid inputs", __func__);
1814 }
1815 }
1816
1817 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1818
1819 if (inputsHaveBeenReshaped)
1820 {
1821 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1822 concatDim += tensorDimensionsAdded;
1823
1824 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1825 if (tensorDimensionsAdded == 1)
1826 {
1827 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1828 }
1829 else if (tensorDimensionsAdded == 2)
1830 {
1831 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1832 }
1833 }
1834
1835 // Check if permutations is required and get the pair of permutations required for the concatenation.
1836 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1837 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1838 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1839
1840 bool needPermute =
1841 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1842
1843 if (needPermute)
1844 {
1845 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1846 }
1847
1848 outputInfo.SetShape(outputShape);
1849
1850 // this is no-op for identity swizzles, otherwise it replaces both
1851 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001852 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1853 {
1854 return false;
1855 }
Mike Kellyb8805202019-07-31 17:25:43 +01001856
1857 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1858 armnn::OriginsDescriptor concatDescriptor;
1859
1860 try
1861 {
1862 // The concat descriptor is always created across the only supported concat dimension
1863 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1864 concatDescriptor =
1865 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1866 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001867 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001868 {
1869 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1870 }
1871
1872 // Validate the output shape is correct given the input shapes based on the
1873 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1874 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1875 {
1876 return Fail("%s: Error validating the output shape for concat", __func__);
1877 }
1878
1879 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1880 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1881 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1882
1883 bool isSupported = false;
1884 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1885 IsConcatSupported,
1886 data.m_Backends,
1887 isSupported,
1888 inputTensorInfos,
1889 outputInfo,
1890 concatDescriptor);
1891 if (!isSupported)
1892 {
1893 return false;
1894 }
1895
1896 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1897 assert(layer != nullptr);
1898 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1899
1900 // Connect inputs to the layer
1901 const int numInputSlots = layer->GetNumInputSlots();
1902 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1903 for (int i = 0; i < numInputSlots; ++i)
1904 {
1905 // connect the input directly to the merge (concat) layer
1906 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1907 }
1908
1909 if (needPermute)
1910 {
Kevin Mayaed08ac2019-12-12 16:33:31 +00001911 armnn::PermuteDescriptor permuteDesc;
1912 permuteDesc.m_DimMappings = permutationPair.second;
1913
1914 bool isSupported = false;
1915 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1916 IsPermuteSupported,
1917 data.m_Backends,
1918 isSupported,
1919 layer->GetOutputSlot(0).GetTensorInfo(),
1920 outputInfo,
1921 permuteDesc);
1922 if (!isSupported)
1923 {
1924 return false;
1925 }
Mike Kellyb8805202019-07-31 17:25:43 +01001926 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1927 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1928 layer->GetOutputSlot(0),
1929 permutationPair.second);
1930 layer = &deswizzleLayer;
1931 }
1932
1933 if (inputsHaveBeenReshaped)
1934 {
1935 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1936
1937 // Undo the reshape knowing the amount of dimensions added
1938 if (tensorDimensionsAdded == 1)
1939 {
1940 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1941 afterConcatInfo.GetShape()[2] }));
1942 }
1943 else if (tensorDimensionsAdded == 2)
1944 {
1945 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1946 }
1947
Kevin Mayaed08ac2019-12-12 16:33:31 +00001948 armnn::ReshapeDescriptor reshapeDescriptor;
1949 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1950
1951 bool isSupported = false;
1952 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1953 IsReshapeSupported,
1954 data.m_Backends,
1955 isSupported,
1956 layer->GetOutputSlot(0).GetTensorInfo(),
1957 afterConcatInfo,
1958 reshapeDescriptor);
1959 if (!isSupported)
1960 {
1961 return false;
1962 }
1963
Mike Kellyb8805202019-07-31 17:25:43 +01001964 layer = &AddReshapeLayer(
1965 *data.m_Network,
1966 layer->GetOutputSlot(0),
1967 afterConcatInfo
1968 );
1969 }
1970
1971 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1972}
1973
1974template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001975 typename HalOperation = typename HalPolicy::Operation,
1976 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001977bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1978{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001979 using HalOperand = typename HalPolicy::Operand;
1980 using HalOperandType = typename HalPolicy::OperandType;
1981
1982 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001983 if (!input.IsValid())
1984 {
1985 return Fail("%s: Operation has invalid inputs", __func__);
1986 }
1987
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001988 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001989 if (!output)
1990 {
1991 return Fail("%s: Could not read output 0", __func__);
1992 }
1993
1994 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001995 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001996
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001997 if (IsDynamicTensor(outputInfo))
1998 {
1999 return Fail("%s: Dynamic output tensors are not supported", __func__);
2000 }
2001
Mike Kellyb5fdf382019-06-11 16:35:25 +01002002 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002003 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2004 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002005
2006 if (!weightsPin.IsValid() || !biasPin.IsValid())
2007 {
2008 return Fail("%s: Operation has invalid inputs", __func__);
2009 }
2010
2011 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002012 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002013 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2014
2015 armnn::Convolution2dDescriptor desc;
2016 desc.m_DataLayout = armnn::DataLayout::NHWC;
2017 ActivationFn activation;
2018
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002019 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002020 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002021 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2022 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2023 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2024 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2025 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2026 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002027 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002028 {
2029 return Fail("%s: Operation has invalid inputs", __func__);
2030 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002031 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002032 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002033 {
2034 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002035 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2036 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2037 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002038 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002039 {
2040 return Fail("%s: Operation has invalid inputs", __func__);
2041 }
2042
2043 const uint32_t kernelX = weights.GetShape()[2];
2044 const uint32_t kernelY = weights.GetShape()[1];
2045 const uint32_t inputX = inputInfo.GetShape()[2];
2046 const uint32_t inputY = inputInfo.GetShape()[1];
2047
2048 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2049 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002050 }
2051 else
2052 {
2053 return Fail("%s: Unsupported number of operation inputs", __func__);
2054 }
2055
2056 desc.m_BiasEnabled = true;
2057 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2058
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002059 bool isSupported = false;
2060 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2061 IsConvolution2dSupported,
2062 data.m_Backends,
2063 isSupported,
2064 inputInfo,
2065 outputInfo,
2066 desc,
2067 weights.GetInfo(),
2068 biases);
2069 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002070 {
2071 return false;
2072 }
2073
2074 armnn::IConnectableLayer* startLayer =
2075 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2076
2077 if (!startLayer)
2078 {
2079 return Fail("%s: AddConvolution2dLayer failed", __func__);
2080 }
2081
2082 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2083
2084 if (!endLayer)
2085 {
2086 return Fail("%s: ProcessActivation failed", __func__);
2087 }
2088
2089 input.Connect(startLayer->GetInputSlot(0));
2090
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002091 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002092}
2093
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002094template<typename HalPolicy,
2095 typename HalOperation = typename HalPolicy::Operation,
2096 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002097bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2098{
2099 using HalOperand = typename HalPolicy::Operand;
2100 using HalOperandType = typename HalPolicy::OperandType;
2101
2102 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2103 if (!input.IsValid() )
2104 {
2105 return Fail("%s: Operation has invalid inputs", __func__);
2106 }
2107
2108 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2109 unsigned int rank = inputInfo.GetNumDimensions();
2110 if (rank != 4)
2111 {
2112 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2113 }
2114
2115 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2116 if (!output)
2117 {
2118 return Fail("%s: Could not read output 0", __func__);
2119 }
2120
2121 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2122 if (IsDynamicTensor(outputInfo))
2123 {
2124 return Fail("%s: Dynamic output tensors are not supported", __func__);
2125 }
2126
2127 armnn::DepthToSpaceDescriptor descriptor;
2128
2129 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2130 if (descriptor.m_BlockSize <= 1)
2131 {
2132 return Fail("%s: Block size must be at least 1 in all dimensions");
2133 }
2134
2135 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2136 if (Is12Operand(*output))
2137 {
2138 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2139 }
2140
2141 bool isSupported = false;
2142 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2143 IsDepthToSpaceSupported,
2144 data.m_Backends,
2145 isSupported,
2146 inputInfo,
2147 outputInfo,
2148 descriptor);
2149 if (!isSupported)
2150 {
2151 return false;
2152 }
2153
2154 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2155 assert(layer != nullptr);
2156 input.Connect(layer->GetInputSlot(0));
2157
2158 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2159}
2160
2161template<typename HalPolicy,
2162 typename HalOperation = typename HalPolicy::Operation,
2163 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002164bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2165{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002166 using HalOperand = typename HalPolicy::Operand;
2167 using HalOperandType = typename HalPolicy::OperandType;
2168
2169 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002170
2171 if (!input.IsValid())
2172 {
2173 return Fail("%s: Operation has invalid inputs", __func__);
2174 }
2175
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002176 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002177
2178 if (!output)
2179 {
2180 return Fail("%s: Could not read output 0", __func__);
2181 }
2182
2183 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002184 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002185
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002186 if (IsDynamicTensor(outputInfo))
2187 {
2188 return Fail("%s: Dynamic output tensors are not supported", __func__);
2189 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002190
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002191 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002192 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002193 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002194
2195 if (weightsOperand == nullptr)
2196 {
2197 return Fail("%s: Operand is invalid", __func__);
2198 }
2199 armnn::DepthwiseConvolution2dDescriptor desc;
2200 desc.m_DataLayout = armnn::DataLayout::NHWC;
2201
Mike Kellyb5fdf382019-06-11 16:35:25 +01002202 // Reinterpret weight data as [ H, W, I, M ]
2203 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2204 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002205 inputInfo.GetShape()[3],
2206 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002207
2208 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2209 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2210
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002211 const ConstTensorPin weightsPin =
2212 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2213 1,
2214 model,
2215 data,
2216 HWIMToMIHW,
2217 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002218
2219 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002220 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002221
2222 if (!weightsPin.IsValid() || !biasPin.IsValid())
2223 {
2224 return Fail("%s: Operation has invalid inputs", __func__);
2225 }
2226
2227 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2228 armnn::ConstTensor bias = biasPin.GetConstTensor();
2229 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2230
2231 ActivationFn activation;
2232
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002233 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002234 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002235 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2236 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2237 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2238 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2239 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2240 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002241 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002242 {
2243 return Fail("%s: Operation has invalid inputs", __func__);
2244 }
2245 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002246 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002247 {
2248 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002249 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2250 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2251 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002252 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002253 {
2254 return Fail("%s: Operation has invalid inputs", __func__);
2255 }
2256
2257 const uint32_t kernelX = weights.GetShape()[3];
2258 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002259 const uint32_t inputX = inputInfo.GetShape()[2];
2260 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002261
2262 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2263 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2264 }
2265 else
2266 {
2267 return Fail("%s: Unsupported number of operation inputs", __func__);
2268 }
2269
2270 desc.m_BiasEnabled = true;
2271 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2272
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002273 bool isSupported = false;
2274 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2275 IsDepthwiseConvolutionSupported,
2276 data.m_Backends,
2277 isSupported,
2278 inputInfo,
2279 outputInfo,
2280 desc,
2281 weights.GetInfo(),
2282 biases);
2283 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002284 {
2285 return false;
2286 }
2287
2288 armnn::IConnectableLayer* startLayer =
2289 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2290 if (!startLayer)
2291 {
2292 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2293 }
2294
2295 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2296 if (!endLayer)
2297 {
2298 return Fail("%s: ProcessActivation failed", __func__);
2299 }
2300
2301 input.Connect(startLayer->GetInputSlot(0));
2302
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002303 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002304}
2305
Mike Kelly3c673942019-07-25 09:26:06 +01002306template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002307 typename HalOperation = typename HalPolicy::Operation,
2308 typename HalModel = typename HalPolicy::Model>
2309bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002310{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002311 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002312
2313 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2314 if (!input.IsValid())
2315 {
2316 return Fail("%s: Operation has invalid input", __func__);
2317 }
2318
Sadik Armagan98c0f662019-11-21 15:54:36 +00002319 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2320 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2321 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2322 {
2323 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2324 }
2325
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002326 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002327 if (!outputOperand)
2328 {
2329 return Fail("%s: Operation has invalid outputs", __func__);
2330 }
2331
2332 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2333 if (IsDynamicTensor(outputInfo))
2334 {
2335 return Fail("%s: Dynamic output tensors are not supported", __func__);
2336 }
2337
2338 bool isSupported = false;
2339 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2340 IsDequantizeSupported,
2341 data.m_Backends,
2342 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002343 inputInfo,
2344 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002345 if (!isSupported)
2346 {
2347 return false;
2348 }
2349
2350 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2351 assert(layer != nullptr);
2352 input.Connect(layer->GetInputSlot(0));
2353
2354 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2355}
2356
2357template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002358 typename HalOperation = typename HalPolicy::Operation,
2359 typename HalModel = typename HalPolicy::Model>
2360bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002361{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002362 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002363
2364 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2365 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2366
2367 if (!input0.IsValid() || !input1.IsValid())
2368 {
2369 return Fail("%s: Operation has invalid inputs", __func__);
2370 }
2371
2372 // The FuseActivation parameter is always the input index 2
2373 // and it should be optional
2374 ActivationFn activationFunction;
2375 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2376 {
2377 return Fail("%s: Operation has invalid inputs", __func__);
2378 }
2379
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002380 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002381 if (!output)
2382 {
2383 return Fail("%s: Could not read output 0", __func__);
2384 }
2385
2386 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2387 if (IsDynamicTensor(outputInfo))
2388 {
2389 return Fail("%s: Dynamic output tensors are not supported", __func__);
2390 }
2391
2392 bool isSupported = false;
2393 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2394 IsDivisionSupported,
2395 data.m_Backends,
2396 isSupported,
2397 input0.GetTensorInfo(),
2398 input1.GetTensorInfo(),
2399 outputInfo);
2400 if (!isSupported)
2401 {
2402 return false;
2403 }
2404
2405 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2406 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2407
2408 if (endLayer)
2409 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002410 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002411 if (!isReshapeSupported)
2412 {
2413 return false;
2414 }
2415
Mike Kelly46272802019-08-14 17:00:48 +01002416 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2417 }
2418 return Fail("%s: ProcessActivation failed", __func__);
2419}
2420
2421template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002422 typename HalOperation = typename HalPolicy::Operation,
2423 typename HalModel = typename HalPolicy::Model>
2424bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002425{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002426 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002427
2428 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2429 if (!input.IsValid())
2430 {
2431 return Fail("%s: Operation has invalid inputs", __func__);
2432 }
2433
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002434 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002435 if (!outputOperand)
2436 {
2437 return Fail("%s: Operation has invalid outputs", __func__);
2438 }
2439
2440 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2441 if (IsDynamicTensor(outputInfo))
2442 {
2443 return Fail("%s: Dynamic output tensors are not supported", __func__);
2444 }
2445
2446 bool isSupported = false;
2447 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2448 IsFloorSupported,
2449 data.m_Backends,
2450 isSupported,
2451 input.GetTensorInfo(),
2452 outputInfo);
2453 if (!isSupported)
2454 {
2455 return false;
2456 }
2457
2458 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2459 assert(layer != nullptr);
2460 input.Connect(layer->GetInputSlot(0));
2461
2462 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2463}
2464
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002465inline bool IsQSymm8(const V1_0::Operand&)
2466{
2467 return false;
2468}
2469
2470#ifdef ARMNN_ANDROID_NN_V1_2
2471
2472inline bool IsQSymm8(const V1_2::Operand& operand)
2473{
2474 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2475}
2476
2477#endif
2478
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002479enum class DequantizeStatus
2480{
2481 SUCCESS,
2482 NOT_REQUIRED,
2483 INVALID_OPERAND
2484};
2485
2486using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2487
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002488template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002489 typename HalOperation = typename HalPolicy::Operation,
2490 typename HalModel = typename HalPolicy::Model>
2491DequantizeResult DequantizeIfRequired(size_t operand_index,
2492 const HalOperation& operation,
2493 const HalModel& model,
2494 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002495{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002496 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002497
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002498 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002499 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002500 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002501 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002502 }
2503
2504 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2505 {
2506 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002507 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002508 }
2509
2510 const size_t weightsInputIndex = operation.inputs[operand_index];
2511
2512 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2513 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2514 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2515 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002516 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002517 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002518 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2519 {
2520 continue;
2521 }
2522
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002523 size_t outOpIndex = weightsInputIndex + 1;
2524 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002525 {
2526 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002527 }
2528
2529 if (outOpIndex != weightsInputIndex)
2530 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002531 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002532 }
2533
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002534 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002535 BOOST_ASSERT(operand);
2536
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002537 if (!IsQSymm8(*operand))
2538 {
2539 // Only supporting dequantize from QSYMM8 to FLOAT
2540 break;
2541 }
2542
2543 // Allocate a new buffer for the dequantized data and manually dequantize
2544 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2545 if (!startValue)
2546 {
2547 // Failed to get the operand address
2548 break;
2549 }
2550
2551 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2552 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002553 const float quantizationScale = operand->scale;
2554
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002555 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2556 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2557 {
2558 float* dstPtr = dequantizedBuffer.get();
2559 BOOST_ASSERT(dstPtr);
2560 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2561 }
2562
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002563 // Construct tensor info for dequantized ConstTensor
2564 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2565 operand->dimensions.data(),
2566 armnn::DataType::Float32);
2567
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002568 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2569 std::move(tensorInfo),
2570 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002571 }
2572
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002573 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002574}
2575
2576template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002577 typename HalOperation = typename HalPolicy::Operation,
2578 typename HalModel = typename HalPolicy::Model>
2579ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2580 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002581 const ConversionData& data,
2582 size_t operandIndex,
2583 bool optional = false)
2584{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002585 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2586
2587 DequantizeStatus status = std::get<3>(dequantized);
2588 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002589 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002590 case DequantizeStatus::INVALID_OPERAND:
2591 {
2592 // return invalid const tensor pin
2593 return ConstTensorPin();
2594 }
2595 case DequantizeStatus::NOT_REQUIRED:
2596 {
2597 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2598 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2599 }
2600 case DequantizeStatus::SUCCESS:
2601 default:
2602 {
2603 return ConstTensorPin(
2604 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2605 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002606 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002607}
2608
2609
Mike Kelly46272802019-08-14 17:00:48 +01002610template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002611 typename HalOperation = typename HalPolicy::Operation,
2612 typename HalModel = typename HalPolicy::Model>
2613bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002614{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002615 using HalOperand = typename HalPolicy::Operand;
2616
Mike Kelly46272802019-08-14 17:00:48 +01002617 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2618 if (!input.IsValid())
2619 {
2620 return Fail("%s: Operation has invalid inputs", __func__);
2621 }
2622
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002623 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002624 if (!output)
2625 {
2626 return Fail("%s: Could not read output 0", __func__);
2627 }
2628
2629 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2630 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2631
2632 if (IsDynamicTensor(outputInfo))
2633 {
2634 return Fail("%s: Dynamic output tensors are not supported", __func__);
2635 }
2636
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002637 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2638 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002639
2640 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002641 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002642 return Fail("%s: Operation has invalid weights", __func__);
2643 }
2644
2645 if (!biasPin.IsValid())
2646 {
2647 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002648 }
2649
2650 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2651 armnn::ConstTensor bias = biasPin.GetConstTensor();
2652 armnn::TensorInfo reshapedInfo = inputInfo;
2653
2654 try
2655 {
2656 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002657 }
2658 catch (const std::exception& e)
2659 {
Mike Kelly46272802019-08-14 17:00:48 +01002660 return Fail("%s: %s", __func__, e.what());
2661 }
2662
2663 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2664 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2665
2666 ActivationFn activationFunction;
2667 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2668 {
2669 return Fail("%s: Operation has invalid inputs", __func__);
2670 }
2671
2672 armnn::FullyConnectedDescriptor desc;
2673 desc.m_TransposeWeightMatrix = true;
2674 desc.m_BiasEnabled = true;
2675
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002676 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2677 weights.GetInfo().GetShape(),
2678 outputInfo.GetShape(),
2679 desc.m_TransposeWeightMatrix))
2680 {
2681 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2682 }
2683
Mike Kelly46272802019-08-14 17:00:48 +01002684 bool isSupported = false;
2685 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2686 IsFullyConnectedSupported,
2687 data.m_Backends,
2688 isSupported,
2689 reshapedInfo,
2690 outputInfo,
2691 weights.GetInfo(),
2692 bias.GetInfo(),
2693 desc);
2694 if (!isSupported)
2695 {
2696 return false;
2697 }
2698
2699 armnn::IConnectableLayer* startLayer =
2700 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2701 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2702
2703 if (endLayer != nullptr)
2704 {
2705 if (inputInfo.GetNumDimensions() > 2U)
2706 {
2707 armnn::ReshapeDescriptor reshapeDescriptor;
2708 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2709
2710 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2711 assert(reshapeLayer != nullptr);
2712 input.Connect(reshapeLayer->GetInputSlot(0));
2713 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2714 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2715 }
2716 else
2717 {
2718 input.Connect(startLayer->GetInputSlot(0));
2719 }
2720
2721 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2722 }
2723 else
2724 {
2725 return Fail("%s: ProcessActivation failed", __func__);
2726 }
2727}
2728
2729template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002730 typename HalOperation = typename HalPolicy::Operation,
2731 typename HalModel = typename HalPolicy::Model>
2732bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002733{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002734 using HalOperand = typename HalPolicy::Operand;
2735
Mike Kelly999e2092019-08-15 10:46:46 +01002736 if (operation.inputs.size() != 1)
2737 {
2738 return Fail("%s: Optional inputs are not supported", __func__);
2739 }
2740
Mike Kelly46272802019-08-14 17:00:48 +01002741 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2742 if (!input.IsValid())
2743 {
2744 return Fail("%s: Operation has invalid inputs", __func__);
2745 }
2746
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002747 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002748 if (!output)
2749 {
2750 return Fail("%s: Could not read output 0", __func__);
2751 }
2752
2753 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2754 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2755
2756 if (IsDynamicTensor(outputInfo))
2757 {
2758 return Fail("%s: Dynamic output tensors are not supported", __func__);
2759 }
2760 if (outputInfo.GetNumDimensions() != 4u)
2761 {
2762 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2763 }
2764
2765 armnn::L2NormalizationDescriptor desc;
2766 desc.m_DataLayout = armnn::DataLayout::NHWC;
2767
2768 bool isSupported = false;
2769 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2770 IsL2NormalizationSupported,
2771 data.m_Backends,
2772 isSupported,
2773 inputInfo,
2774 outputInfo,
2775 desc);
2776 if (!isSupported)
2777 {
2778 return false;
2779 }
2780
2781 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2782 assert(layer != nullptr);
2783 input.Connect(layer->GetInputSlot(0));
2784
2785 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2786}
2787
2788template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002789 typename HalOperation = typename HalPolicy::Operation,
2790 typename HalModel = typename HalPolicy::Model>
2791bool ConvertLocalResponseNormalization(const HalOperation& operation,
2792 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002793 ConversionData& data)
2794{
Mike Kelly999e2092019-08-15 10:46:46 +01002795 if (operation.inputs.size() != 5)
2796 {
2797 return Fail("%s: Optional inputs are not supported", __func__);
2798 }
2799
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002800 using HalOperand = typename HalPolicy::Operand;
2801 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002802
2803 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2804 if (!input.IsValid())
2805 {
2806 return Fail("%s: Operation has invalid inputs", __func__);
2807 }
2808
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002809 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002810 if (!output)
2811 {
2812 return Fail("%s: Could not read output 0", __func__);
2813 }
2814
2815 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2816 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2817
2818 if (IsDynamicTensor(outputInfo))
2819 {
2820 return Fail("%s: Dynamic output tensors are not supported", __func__);
2821 }
2822 if (outputInfo.GetNumDimensions() != 4u)
2823 {
2824 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2825 }
2826
2827 armnn::NormalizationDescriptor descriptor;
2828 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2829 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2830 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2831
2832 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002833 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002834 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2835 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2836 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2837 {
2838 return Fail("%s: Operation has invalid inputs", __func__);
2839 }
2840
2841 // ArmNN expects normSize to be the full size of the normalization
2842 // window rather than the radius as in AndroidNN.
2843 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2844
2845 bool isSupported = false;
2846 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2847 IsNormalizationSupported,
2848 data.m_Backends,
2849 isSupported,
2850 inputInfo,
2851 outputInfo,
2852 descriptor);
2853 if (!isSupported)
2854 {
2855 return false;
2856 }
2857
2858
2859 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2860 assert(layer != nullptr);
2861 input.Connect(layer->GetInputSlot(0));
2862
2863 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2864}
2865
2866template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002867 typename HalOperation = typename HalPolicy::Operation,
2868 typename HalModel = typename HalPolicy::Model>
2869bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002870{
Mike Kelly46272802019-08-14 17:00:48 +01002871 armnn::ActivationDescriptor desc;
2872 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2873
2874 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2875}
2876
2877template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002878 typename HalOperation = typename HalPolicy::Operation,
2879 typename HalModel = typename HalPolicy::Model>
2880bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002881{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002882 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002883
2884 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2885 if (!input.IsValid())
2886 {
2887 return Fail("%s: Operation has invalid inputs", __func__);
2888 }
2889
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002890 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002891 if (!output)
2892 {
2893 return Fail("%s: Could not read output 0", __func__);
2894 }
2895
2896 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2897 if (IsDynamicTensor(outputInfo))
2898 {
2899 return Fail("%s: Dynamic output tensors are not supported", __func__);
2900 }
2901
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002902 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002903 if (!axisOperand)
2904 {
2905 return Fail("%s: Could not read input 1", __func__);
2906 }
2907
2908 std::vector<int32_t> axis;
2909 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2910 {
2911 return Fail("%s: Input 1 has invalid values", __func__);
2912 }
2913
2914 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2915
2916 // Convert the axis to unsigned int and remove duplicates.
2917 unsigned int rank = inputInfo.GetNumDimensions();
2918 std::set<unsigned int> uniqueAxis;
2919 std::transform(axis.begin(), axis.end(),
2920 std::inserter(uniqueAxis, uniqueAxis.begin()),
2921 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2922
2923 // Get the "keep dims" flag.
2924 int32_t keepDims = 0;
2925 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2926 {
2927 return Fail("%s: Could not read input 2", __func__);
2928 }
2929
2930 armnn::MeanDescriptor descriptor;
2931 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2932 descriptor.m_KeepDims = keepDims > 0;
2933
2934 bool isSupported = false;
2935 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2936 IsMeanSupported,
2937 data.m_Backends,
2938 isSupported,
2939 inputInfo,
2940 outputInfo,
2941 descriptor);
2942 if (!isSupported)
2943 {
2944 return false;
2945 }
2946
2947 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2948 assert(layer != nullptr);
2949 input.Connect(layer->GetInputSlot(0));
2950
2951 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2952}
2953
2954template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002955 typename HalOperation = typename HalPolicy::Operation,
2956 typename HalModel = typename HalPolicy::Model>
2957bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002958{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002959 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002960
2961 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2962 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2963
2964 if (!input0.IsValid() || !input1.IsValid())
2965 {
2966 return Fail("%s: Operation has invalid inputs", __func__);
2967 }
2968
2969 // The FuseActivation parameter is always the input index 2
2970 // and it should be optional
2971 ActivationFn activationFunction;
2972 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2973 {
2974 return Fail("%s: Operation has invalid inputs", __func__);
2975 }
2976
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002977 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002978
2979 if (outputOperand == nullptr)
2980 {
2981 return false;
2982 }
2983
2984 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2985 if (IsDynamicTensor(outputInfo))
2986 {
2987 return Fail("%s: Dynamic output tensors are not supported", __func__);
2988 }
2989
2990 bool isSupported = false;
2991 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2992 IsMultiplicationSupported,
2993 data.m_Backends,
2994 isSupported,
2995 input0.GetTensorInfo(),
2996 input1.GetTensorInfo(),
2997 outputInfo);
2998 if (!isSupported)
2999 {
3000 return false;
3001 }
3002
3003 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3004 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3005
3006 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3007 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3008
3009 if (endLayer != nullptr)
3010 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003011 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003012 if (!isReshapeSupported)
3013 {
3014 return false;
3015 }
3016
Mike Kelly46272802019-08-14 17:00:48 +01003017 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3018 }
3019 else
3020 {
3021 return Fail("%s: ProcessActivation failed", __func__);
3022 }
3023}
3024
3025template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003026 typename HalOperation = typename HalPolicy::Operation,
3027 typename HalModel = typename HalPolicy::Model>
3028bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003029{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003030 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003031
Mike Kelly3c673942019-07-25 09:26:06 +01003032 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3033 if (!input.IsValid())
3034 {
3035 return Fail("%s: Operation has invalid inputs", __func__);
3036 }
3037
3038 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3039 unsigned int rank = inputInfo.GetNumDimensions();
3040
3041 armnn::PadDescriptor descriptor;
3042 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3043 {
3044 return Fail("%s: Could not convert paddings", __func__);
3045 }
3046
3047 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3048 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3049 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003050 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003051 {
3052 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3053 }
3054
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003055 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003056 if (!output)
3057 {
3058 return Fail("%s: Could not read output", __func__);
3059 }
3060
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003061 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003062 if (IsDynamicTensor(outputInfo))
3063 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003064 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003065 }
3066
3067 bool isSupported = false;
3068 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3069 IsPadSupported,
3070 data.m_Backends,
3071 isSupported,
3072 inputInfo,
3073 outputInfo,
3074 descriptor);
3075 if (!isSupported)
3076 {
3077 return false;
3078 }
3079
3080 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3081 assert(layer != nullptr);
3082 input.Connect(layer->GetInputSlot(0));
3083 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3084
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003085 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003086}
3087
Mike Kelly0a879362019-07-29 16:56:31 +01003088template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003089 typename HalOperation = typename HalPolicy::Operation,
3090 typename HalModel = typename HalPolicy::Model>
3091bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003092{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003093 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003094
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003095 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3096 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3097 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003098
3099 if (inputOperand == nullptr
3100 || requestedShapeOperand == nullptr
3101 || outputOperand == nullptr)
3102 {
3103 return Fail("%s: Operation has invalid inputs", __func__);
3104 }
3105
3106 if (requestedShapeOperand->dimensions.size() != 1)
3107 {
3108 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3109 __func__, requestedShapeOperand->dimensions.size());
3110 }
3111
3112 std::vector<int32_t> targetDimensions;
3113 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3114 {
3115 return Fail("%s: Could not read values of input 1", __func__);
3116 }
3117
3118 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3119
3120 Shape requestedShape;
3121 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3122 // function that resolves these values into a fully specified tensor shape.
3123 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3124 {
3125 return Fail("%s: Failed to resolve the requested shape", __func__);
3126 }
3127
3128 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3129 if (!SameShape(requestedShape, outputOperandShape))
3130 {
3131 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3132 }
3133
3134 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3135 if (!input.IsValid())
3136 {
3137 return Fail("%s: Could not read input 0", __func__);
3138 }
3139
3140 armnn::ReshapeDescriptor reshapeDescriptor;
3141 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3142 requestedShape.dimensions.data());
3143
3144 bool isSupported = false;
3145 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3146 IsReshapeSupported,
3147 data.m_Backends,
3148 isSupported,
3149 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003150 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003151 reshapeDescriptor);
3152 if (!isSupported)
3153 {
3154 return false;
3155 }
3156
3157 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3158 assert(layer != nullptr);
3159 input.Connect(layer->GetInputSlot(0));
3160
3161 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3162}
3163
3164template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003165 typename HalOperation = typename HalPolicy::Operation,
3166 typename HalModel = typename HalPolicy::Model>
3167bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003168{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003169 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003170
Mike Kelly0a879362019-07-29 16:56:31 +01003171 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3172 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3173
3174 if (!input0.IsValid() || !input1.IsValid())
3175 {
3176 return Fail("%s: Operation has invalid inputs", __func__);
3177 }
3178
3179 // The FuseActivation parameter is always the input index 2
3180 // and it should be optional
3181 ActivationFn activationFunction;
3182 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3183 {
3184 return Fail("%s: Operation has invalid inputs", __func__);
3185 }
3186
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003187 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003188 if (!output)
3189 {
3190 return Fail("%s: Could not read output 0", __func__);
3191 }
3192
3193 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3194 if (IsDynamicTensor(outputInfo))
3195 {
3196 return Fail("%s: Dynamic output tensors are not supported", __func__);
3197 }
3198
3199 bool isSupported = false;
3200 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3201 IsSubtractionSupported,
3202 data.m_Backends,
3203 isSupported,
3204 input0.GetTensorInfo(),
3205 input1.GetTensorInfo(),
3206 outputInfo);
3207 if (!isSupported)
3208 {
3209 return false;
3210 }
3211
3212 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3213 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3214
3215 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3216 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3217
3218 if (endLayer)
3219 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003220 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003221 if (!isReshapeSupported)
3222 {
3223 return false;
3224 }
Mike Kelly0a879362019-07-29 16:56:31 +01003225 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3226 }
3227
3228 return Fail("%s: ProcessActivation failed", __func__);
3229}
3230
Finn Williams23b87b32019-07-30 11:44:05 +01003231template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003232 typename HalOperation = typename HalPolicy::Operation,
3233 typename HalModel = typename HalPolicy::Model>
3234bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003235{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003236 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003237
3238 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3239 if (!input.IsValid())
3240 {
3241 return Fail("%s: Operation has invalid inputs", __func__);
3242 }
3243
3244 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3245 unsigned int rank = inputInfo.GetNumDimensions();
3246 if (rank > 4)
3247 {
3248 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3249 }
3250
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003251 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003252 if (!output)
3253 {
3254 return Fail("%s: Could not read output 0", __func__);
3255 }
3256
3257 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3258 {
3259 return Fail("%s: Dynamic output tensors are not supported", __func__);
3260 }
3261
3262 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3263 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003264 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003265
3266 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3267
3268 std::vector<int32_t> axis;
3269 if (!axisOperand)
3270 {
3271 axis.assign(dimensionSequence,
3272 dimensionSequence + rank);
3273 }
3274 else
3275 {
3276 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3277 }
3278
3279 std::vector<uint32_t> outputDims;
3280 for (unsigned int i = 0; i < rank; i++)
3281 {
3282 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3283 auto currentDimension = inputInfo.GetShape()[i];
3284 if (skipSqueeze || currentDimension != 1)
3285 {
3286 outputDims.push_back(currentDimension);
3287 }
3288 }
3289
3290 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3291
3292 armnn::TensorInfo outputInfo = inputInfo;
3293 outputInfo.SetShape(outShape);
3294
3295 armnn::ReshapeDescriptor reshapeDesc;
3296 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3297
3298 bool isSupported = false;
3299 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3300 IsReshapeSupported,
3301 data.m_Backends,
3302 isSupported,
3303 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003304 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003305 reshapeDesc);
3306 if (!isSupported)
3307 {
3308 return false;
3309 }
3310
3311 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3312 assert(layer != nullptr);
3313 input.Connect(layer->GetInputSlot(0));
3314
3315 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3316}
3317
3318template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003319 typename HalOperation = typename HalPolicy::Operation,
3320 typename HalModel = typename HalPolicy::Model>
3321bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003322{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003323 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003324
3325 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3326 if (!input.IsValid())
3327 {
3328 return Fail("%s: Operation has invalid inputs", __func__);
3329 }
3330
3331 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3332 unsigned int rank = inputInfo.GetNumDimensions();
3333 if (rank > 4)
3334 {
3335 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3336 }
3337
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003338 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003339 if (!output)
3340 {
3341 return Fail("%s: Could not read output 0", __func__);
3342 }
3343
3344 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3345 if (IsDynamicTensor(outputInfo))
3346 {
3347 return Fail("%s: Dynamic output tensors are not supported", __func__);
3348 }
3349
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003350 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3351 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3352 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003353
3354 std::vector<int32_t> beginValues;
3355 std::vector<int32_t> endValues;
3356 std::vector<int32_t> stridesValues;
3357
3358 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003359 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003360 {
3361 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3362 {
3363 return false;
3364 }
3365
3366 if (operandValues.size() != rank)
3367 {
3368 return false;
3369 }
3370
3371 return true;
3372 };
3373
3374 if (!ValidateInputOperands(*beginOperand, beginValues)
3375 || !ValidateInputOperands(*endOperand, endValues)
3376 || !ValidateInputOperands(*stridesOperand, stridesValues))
3377 {
3378 return Fail("%s: Operation has invalid input operand", __func__);
3379 }
3380
3381 // Stride cannot have value '0'
3382 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3383 {
3384 return Fail("%s: Stride must be non-zero value.", __func__);
3385 }
3386
3387 armnn::StridedSliceDescriptor descriptor;
3388 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3389 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3390 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3391 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3392
3393 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3394 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3395 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3396 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3397 {
3398 return Fail("%s: Operation has invalid inputs", __func__);
3399 }
3400
3401 bool isSupported = false;
3402 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3403 IsStridedSliceSupported,
3404 data.m_Backends,
3405 isSupported,
3406 inputInfo,
3407 outputInfo,
3408 descriptor);
3409 if (!isSupported)
3410 {
3411 return false;
3412 }
3413
3414 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3415 assert(layer != nullptr);
3416 input.Connect(layer->GetInputSlot(0));
3417
3418 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3419}
3420
3421template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003422 typename HalOperation = typename HalPolicy::Operation,
3423 typename HalModel = typename HalPolicy::Model>
3424bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003425{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003426 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003427
3428 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3429 if (!input.IsValid())
3430 {
3431 return Fail("%s: Operation has invalid inputs", __func__);
3432 }
3433
3434 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3435 unsigned int rank = inputInfo.GetNumDimensions();
3436 if (rank > 4)
3437 {
3438 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3439 }
3440
3441 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3442 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003443 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003444
3445 std::vector<int32_t> perm(rank);
3446 if (!permOperand)
3447 {
3448 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3449 for (unsigned int i = rank; i > 0; i--)
3450 {
3451 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3452 }
3453 }
3454 else
3455 {
3456 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3457 }
3458
3459 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3460
James Conroy1bde8e32020-01-22 16:40:57 +00003461 // Permutation vectors (outputDims) are given in ANN/Tf format, we must convert them to ArmNN format
3462 // For ANN/Tf/ACL: output[i] = input[ perm[i] ]
3463 // For ArmNN: output[ perm[i] ] = input[i]
3464 // e.g. 3,0,1,2 -> 1,2,3,0
3465 std::vector<unsigned int> armnnPermuteShape(rank);
3466 std::vector<unsigned int>::iterator it;
3467 for (unsigned int i = 0u; i < rank; ++i)
3468 {
3469 it = std::find(outputDims.begin(), outputDims.end(), i);
3470 armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(outputDims.begin(), it));
3471 }
3472
Mike Kelly46272802019-08-14 17:00:48 +01003473 armnn::PermuteDescriptor permuteDesc;
James Conroy1bde8e32020-01-22 16:40:57 +00003474 permuteDesc.m_DimMappings = armnn::PermutationVector(armnnPermuteShape.data(), armnnPermuteShape.size());
Mike Kelly46272802019-08-14 17:00:48 +01003475
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003476 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003477 if (!output)
3478 {
3479 return Fail("%s: Could not read output 0", __func__);
3480 }
3481
3482 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003483 if (IsDynamicTensor(outputInfo))
3484 {
3485 return Fail("%s: Dynamic output tensors are not supported", __func__);
3486 }
3487
Mike Kelly46272802019-08-14 17:00:48 +01003488
3489 bool isSupported = false;
3490 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3491 IsPermuteSupported,
3492 data.m_Backends,
3493 isSupported,
3494 inputInfo,
3495 outputInfo,
3496 permuteDesc);
3497 if (!isSupported)
3498 {
3499 return false;
3500 }
3501
3502 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3503 assert(layer != nullptr);
3504 input.Connect(layer->GetInputSlot(0));
3505
3506 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3507}
3508
3509template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003510 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003511 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003512 typename HalModel = typename HalPolicy::Model>
3513bool ConvertBatchToSpaceNd(const HalOperation& operation,
3514 const HalModel& model,
3515 ConversionData& data)
3516{
Finn Williams23b87b32019-07-30 11:44:05 +01003517
3518 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3519 if (!input.IsValid())
3520 {
3521 return Fail("%s: Operation has invalid inputs", __func__);
3522 }
3523
3524 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3525 if (!output)
3526 {
3527 return Fail("%s: Could not read output 0", __func__);
3528 }
3529
3530 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3531 if (IsDynamicTensor(outputInfo))
3532 {
3533 return Fail("%s: Dynamic output tensors are not supported", __func__);
3534 }
3535
3536 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3537 if (!blockOperand)
3538 {
3539 return Fail("%s: Could not read input 1", __func__);
3540 }
3541
3542 // Convert the block operand to int32
3543 std::vector<int32_t> block;
3544 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3545 {
3546 return Fail("%s: Input 1 has invalid values", __func__);
3547 }
3548
3549 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3550
3551 unsigned int rank = inputInfo.GetNumDimensions();
3552 if (rank != 4)
3553 {
3554 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3555 }
3556
3557 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3558 {
3559 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3560 " greater than or equal to 1", __func__);
3561 }
3562
3563 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3564 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3565 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3566
3567 if (Is12Operand(*output))
3568 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003569 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003570 }
3571 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3572 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3573
3574 bool isSupported = false;
3575 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3576 IsBatchToSpaceNdSupported,
3577 data.m_Backends,
3578 isSupported,
3579 inputInfo,
3580 outputInfo,
3581 batchToSpaceNdDesc);
3582 if (!isSupported)
3583 {
3584 return false;
3585 }
3586
3587 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3588 assert(layer != nullptr);
3589 input.Connect(layer->GetInputSlot(0));
3590
3591 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3592}
Mike Kelly0a879362019-07-29 16:56:31 +01003593
Finn Williamsd74c5052019-07-30 17:06:00 +01003594template<typename HalPolicy,
3595 typename HalOperation = typename HalPolicy::Operation,
3596 typename HalOperand = typename HalPolicy::Operand,
3597 typename HalModel = typename HalPolicy::Model>
3598bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3599{
3600 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3601 if (!input.IsValid())
3602 {
3603 return Fail("%s: Operation has invalid inputs", __func__);
3604 }
3605
3606 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3607 unsigned int rank = inputInfo.GetNumDimensions();
3608 unsigned int spatialDim = rank - 2;
3609
3610 if (rank != 4)
3611 {
3612 Fail("%s: Only inputs with rank 4 are supported", __func__);
3613 }
3614
3615 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3616 if (!output)
3617 {
3618 return Fail("%s: Could not read output 0", __func__);
3619 }
3620
3621 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3622 if (IsDynamicTensor(outputInfo))
3623 {
3624 return Fail("%s: Dynamic output tensors are not supported", __func__);
3625 }
3626
3627 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3628 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3629
3630 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3631 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3632 {
3633 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3634 }
3635
3636 std::vector<int32_t> blockShape;
3637 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3638 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3639 {
3640 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3641 }
3642
3643 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3644 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3645 {
3646 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3647 }
3648
3649 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3650 std::vector<int32_t> paddings;
3651 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3652 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3653 {
3654 int paddingBeforeInput = paddings[i];
3655 int paddingAfterInput = paddings[i + 1];
3656 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3657 {
3658 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3659 }
3660
3661 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3662 }
3663
3664 armnn::SpaceToBatchNdDescriptor descriptor;
3665 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3666 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3667 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3668
3669 if (Is12Operand(*output))
3670 {
3671 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3672 }
3673
3674 bool isSupported = false;
3675 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3676 IsSpaceToBatchNdSupported,
3677 data.m_Backends,
3678 isSupported,
3679 inputInfo,
3680 outputInfo,
3681 descriptor);
3682 if (!isSupported)
3683 {
3684 return false;
3685 }
3686
3687 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3688 assert(layer != nullptr);
3689 input.Connect(layer->GetInputSlot(0));
3690
3691 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3692}
3693
saoste01b8471482018-10-10 09:44:51 +01003694} // namespace armnn_driver