blob: eea70d7b64ad243bc07eb7d949139315ae2141ba [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000015#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
Keith Davis71006492020-01-06 17:44:16 +0000184// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
186{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000187 return type == V1_2::OperandType::BOOL ||
188 type == V1_2::OperandType::TENSOR_FLOAT16 ||
189 type == V1_2::OperandType::TENSOR_FLOAT32 ||
190 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000192 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
193 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194 type == V1_2::OperandType::TENSOR_INT32;
195}
196
197#endif
198
199inline bool IsBool(V1_0::Operand)
200{
201 return false;
202}
203
Sadik Armagan61113162019-07-25 09:09:40 +0100204inline bool Is12Operand(V1_0::Operand)
205{
206 return false;
207}
208
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209#ifdef ARMNN_ANDROID_NN_V1_2
210
211inline bool IsBool(V1_2::Operand operand)
212{
213 return operand.type == V1_2::OperandType::BOOL;
214}
215
Sadik Armagan61113162019-07-25 09:09:40 +0100216/// Checks if a operand is 1_2 Operand
217inline bool Is12Operand(V1_2::Operand)
218{
219 return true;
220}
221
Mike Kellyb5fdf382019-06-11 16:35:25 +0100222#endif
223
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100224template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000225armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
226 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 armnn::TensorInfo reshapeInfo)
228{
229 armnn::ReshapeDescriptor reshapeDescriptor;
230 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
231
232 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
233 BOOST_ASSERT(reshapeLayer != nullptr);
234
235 // Attach the input layer to the reshape layer
236 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
237 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
238
239 return *reshapeLayer;
240}
241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000242bool BroadcastTensor(LayerInputHandle& input0,
243 LayerInputHandle& input1,
244 armnn::IConnectableLayer* startLayer,
245 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100246{
247 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100248
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100249 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
250 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
251
252 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
253 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
254
255 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100256 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100257 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
258 input0.Connect(startLayer->GetInputSlot(0));
259 input1.Connect(startLayer->GetInputSlot(1));
260
Sadik Armagan64b19b52019-08-19 09:49:58 +0100261 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100262 }
263
264 // Since the number of dimensions do not match then we need to add degenerate dimensions
265 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
266
267 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
268 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
269 boost::numeric_cast<int>(inputDimensions1));
270
271 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
272 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
273 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
274
275 const armnn::TensorShape& smallShape = smallInfo.GetShape();
276 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
277 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
278 {
279 reshapedDimensions[i] = smallShape[i - sizeDifference];
280 }
281
282 armnn::TensorInfo reshapedInfo = smallInfo;
283 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
284 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100285
286 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
287 armnn::ReshapeDescriptor reshapeDescriptor;
288
289 bool isSupported = false;
290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
291 IsReshapeSupported,
292 data.m_Backends,
293 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000294 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100295 reshapedInfo,
296 reshapeDescriptor);
297 if (!isSupported)
298 {
299 return false;
300 }
301
302 BOOST_ASSERT(data.m_Network != nullptr);
303 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100304
305 if (input0IsSmaller)
306 {
307 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
308 //
309 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100310 // | |
311 // Reshape |
312 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100313 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100314
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100315 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
316 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100317 }
318 else
319 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100320 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
321 //
322 // Input0 Input1
323 // | |
324 // | Reshape
325 // \ /
326 // StartLayer
327
arovir01b0717b52018-09-05 17:03:25 +0100328 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100329 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100330 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100331
332 return true;
arovir01b0717b52018-09-05 17:03:25 +0100333}
334
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000335void CalcPadding(uint32_t input,
336 uint32_t kernel,
337 uint32_t stride,
338 uint32_t& outPadHead,
339 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100340 android::nn::PaddingScheme scheme)
341{
342 int32_t padHead;
343 int32_t padTail;
344 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
345 outPadHead = boost::numeric_cast<uint32_t>(padHead);
346 outPadTail = boost::numeric_cast<uint32_t>(padTail);
347}
348
Mike Kelly86b36d42019-07-12 16:39:33 +0100349#ifdef ARMNN_ANDROID_NN_V1_2
350
351void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
352 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
353{
354 int32_t padHead;
355 int32_t padTail;
356 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
357 outPadHead = boost::numeric_cast<uint32_t>(padHead);
358 outPadTail = boost::numeric_cast<uint32_t>(padTail);
359}
360
Mike Kelly26123db2020-01-15 10:02:33 +0000361void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100362 int32_t& outPadTail, android::nn::PaddingScheme scheme)
363{
364 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
365}
366
Mike Kelly86b36d42019-07-12 16:39:33 +0100367#endif
368
Matthew Bentham912b3622019-05-03 15:49:14 +0100369Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100370{
371 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100372 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100373 shape.dimensions = operand.dimensions;
374 shape.scale = operand.scale;
375 shape.offset = operand.zeroPoint;
376 return shape;
377}
378
Mike Kelly46272802019-08-14 17:00:48 +0100379#ifdef ARMNN_ANDROID_NN_V1_2
380
381Shape GetOperandShape(const V1_2::Operand& operand)
382{
383 Shape shape;
384 shape.type = OperandType(operand.type);
385 shape.dimensions = operand.dimensions;
386 shape.scale = operand.scale;
387 shape.offset = operand.zeroPoint;
388 return shape;
389}
390
391#endif
392
arovir01b0717b52018-09-05 17:03:25 +0100393// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
394// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100395// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
396// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100397void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 const armnn::TensorInfo& weightInfo,
399 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100400{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000401 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100402 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000403 // NOTE: Bias scale is always set to 0 for per-axis quantization and
404 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
405 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100406 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000407 return biasScale * inputInfo.GetQuantizationScale();
408 };
409
410 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
411 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
412
413 biasInfo.SetQuantizationScales(biasScales);
414 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
415
416 ALOGV("Bias quantization params have been updated for per-axis quantization");
417 }
418 else
419 {
420 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
421 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
422 {
423 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
424 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
425 {
426 ALOGW("Bias quantization scale has been modified to match input * weights");
427 biasInfo.SetQuantizationScale(expectedBiasScale);
428 }
arovir01b0717b52018-09-05 17:03:25 +0100429 }
430 }
431}
432
433// 4D Tensor Permutations
434const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
arovir01b0717b52018-09-05 17:03:25 +0100435const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
436
437// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000438const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
439const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100440
441template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000442armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
443 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100444{
445 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000446 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100447
448 BOOST_ASSERT(layer != nullptr);
449
450 // Connect input to swizzle layer
451 input.Connect(layer->GetInputSlot(0));
452
453 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000454 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100455 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
456
457 return *layer;
458}
459
arovir01b0717b52018-09-05 17:03:25 +0100460bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
461 const armnn::TensorShape & outputShape,
462 uint32_t concatDim)
463{
464 // Validate the output shape is correct given the input shapes (which have just been validated)
465 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
466 if (outputShape.GetNumDimensions() != numDimensions)
467 {
468 return Fail("%s: Output shape has wrong number of dimensions", __func__);
469 }
470
471 unsigned int outputSizeAlongConcatenatedDimension = 0;
472 for (unsigned int i = 0; i < inputShapes.size(); i++)
473 {
474 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
475 }
476
477 for (unsigned int i = 0; i < numDimensions; ++i)
478 {
479 if (i == concatDim)
480 {
481 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
482 {
483 return Fail(
484 "%s: Invalid output shape for dimension %d (%d != %d)",
485 __func__,
486 i,
487 outputShape[i],
488 outputSizeAlongConcatenatedDimension);
489 }
490 }
491 else
492 {
493 if (outputShape[i] != inputShapes[0][i])
494 {
495 return Fail("%s: Invalid output shape", __func__);
496 }
497 }
498 }
499
500 return true;
501}
502
503bool RequiresReshape(armnn::TensorShape & inputShape)
504{
505 return inputShape.GetNumDimensions() < 3;
506}
507
arovir01b0717b52018-09-05 17:03:25 +0100508void SwizzleInputs(armnn::INetwork& network,
509 std::vector<LayerInputHandle>& inputs,
510 std::vector<armnn::TensorShape>& inputShapes,
511 const armnn::PermutationVector& mapping)
512{
513 if (!mapping.IsEqual(IdentityPermutation4D))
514 {
515 size_t nInputs = inputs.size();
516 for (size_t i=0; i<nInputs; ++i)
517 {
518 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000519 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100520 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
521 auto& outputInfo = outputSlot.GetTensorInfo();
522 // replace inputs with the swizzled ones
523 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
524 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
525 }
526 }
527}
528
Kevin Mayaed08ac2019-12-12 16:33:31 +0000529bool CheckReshapeSupported(ConversionData& data,
530 std::vector<LayerInputHandle>& inputs,
531 std::vector<armnn::TensorShape>& inputShapes,
532 const armnn::PermutationVector& mapping,
533 const armnn::TensorInfo& outputInfo)
534{
535 if (!mapping.IsEqual(IdentityPermutation4D))
536 {
537 size_t nInputs = inputs.size();
538 for (size_t i=0; i<nInputs; ++i)
539 {
540 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000541 armnn::TransposeDescriptor transposeDesc;
542 transposeDesc.m_DimMappings = mapping;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000543
544 bool isSupported = false;
545 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000546 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000547 data.m_Backends,
548 isSupported,
549 inputs[i].GetTensorInfo(),
550 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000551 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000552 if (!isSupported)
553 {
554 return false;
555 }
556
557 }
558 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
559 }
560 return true;
561}
562
563
narpra01f176d5a2018-11-18 20:17:48 +0000564bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
565 int32_t & concatDimension,
566 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100567{
narpra01f176d5a2018-11-18 20:17:48 +0000568 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100569 BOOST_ASSERT(numberOfDimensions >= 3);
570
571 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000572 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
573 // or along dimension 0 or 2 for a 3-D tensor.
574 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100575 {
narpra01f176d5a2018-11-18 20:17:48 +0000576 concatDimension = 1;
577 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
578 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100579 }
narpra01f176d5a2018-11-18 20:17:48 +0000580 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100581 {
narpra01f176d5a2018-11-18 20:17:48 +0000582 concatDimension = 0;
583 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
584 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100585 }
narpra01f176d5a2018-11-18 20:17:48 +0000586 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100587}
588
589} // anonymous namespace
590
591namespace armnn_driver
592{
593
594//// Creates an ArmNN activation layer and connects it to the given layer, if the
595//// passed in AndroidNN activation function requires so.
596//// @return The end layer of the sequence of layers built for the given AndroidNN
597//// activation function or nullptr if an error occurred (e.g. unsupported activation).
598//// Note that the end layer matches the input layer if no activation is required
599//// (the sequence of layers has length 1).
600armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
601 ActivationFn activation,
602 armnn::IConnectableLayer* prevLayer,
603 ConversionData& data);
604
605} // namespace armnn_driver
606
607///
608/// Utility templates
609///
610
611namespace armnn_driver
612{
613
614using namespace android::nn;
615
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100616template<typename HalPolicy,
617 typename HalOperand = typename HalPolicy::Operand,
618 typename HalOperation = typename HalPolicy::Operation,
619 typename HalModel = typename HalPolicy::Model>
620const HalOperand* GetInputOperand(const HalOperation& operation,
621 uint32_t inputIndex,
622 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100623 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100624{
625 if (inputIndex >= operation.inputs.size())
626 {
saoste01b8471482018-10-10 09:44:51 +0100627 if (failOnIndexOutOfBounds)
628 {
629 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
630 }
arovir01b0717b52018-09-05 17:03:25 +0100631 return nullptr;
632 }
633
634 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
635 return &model.operands[operation.inputs[inputIndex]];
636}
637
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100638template<typename HalPolicy,
639 typename HalOperand = typename HalPolicy::Operand,
640 typename HalOperation = typename HalPolicy::Operation,
641 typename HalModel = typename HalPolicy::Model>
642const HalOperand* GetOutputOperand(const HalOperation& operation,
643 uint32_t outputIndex,
644 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100645{
646 if (outputIndex >= operation.outputs.size())
647 {
648 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
649 return nullptr;
650 }
651
652 // Model should have been validated beforehand
653 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
654
655 return &model.operands[operation.outputs[outputIndex]];
656}
657
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100658template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100659 typename HalOperand = typename HalPolicy::Operand,
660 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100661const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100662 const HalModel& model,
663 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000664 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100665{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100666 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100667
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100668 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100669 switch (operand.lifetime)
670 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100671 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100672 {
673 // Constant found in model.operandValues
674 valueStart = &model.operandValues[operand.location.offset];
675 break;
676 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100677 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100678 {
679 // Constant specified via a Memory object
680 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
681 break;
682 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100683 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000684 {
685 // An optional input tensor with no values is not an error so should not register as a fail
686 if (optional)
687 {
688 valueStart = nullptr;
689 break;
690 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100691 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000692 }
arovir01b0717b52018-09-05 17:03:25 +0100693 default:
694 {
695 // Unsupported/invalid (e.g. can't get value of an input to the model)
696 Fail("%s: unsupported/invalid operand lifetime: %s",
697 __func__, toString(operand.lifetime).c_str());
698 valueStart = nullptr;
699 }
700 }
701
702 return valueStart;
703}
704
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100705template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100706 typename HalOperation = typename HalPolicy::Operation,
707 typename HalModel = typename HalPolicy::Model,
708 typename HalOperandType = typename HalPolicy::OperandType>
709bool GetOperandType(const HalOperation& operation,
710 uint32_t inputIndex,
711 const HalModel& model,
712 HalOperandType& type)
713{
714 using HalOperand = typename HalPolicy::Operand;
715
716 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
717 if (!operand)
718 {
719 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
720 }
721
722 type = operand->type;
723 return true;
724}
725
726template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000727 typename HalOperand = typename HalPolicy::Operand>
728bool IsOperandConstant(const HalOperand& operand)
729{
730 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
731
732 HalOperandLifeTime lifetime = operand.lifetime;
733
734 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
735 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
736 lifetime == HalOperandLifeTime::NO_VALUE;
737}
738
739template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740 typename HalOperand = typename HalPolicy::Operand,
741 typename HalModel = typename HalPolicy::Model>
742ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
743 const HalModel& model,
744 const ConversionData& data,
745 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
746 const armnn::TensorShape* overrideTensorShape = nullptr,
747 bool optional = false)
748{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100749 if (!IsOperandTypeSupportedForTensors(operand.type))
750 {
751 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
752 return ConstTensorPin();
753 }
754
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000755 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100756 {
757 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
758 return ConstTensorPin();
759 }
760
761 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
762 if (!valueStart)
763 {
764 if (optional)
765 {
766 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
767 return ConstTensorPin(true);
768 }
769 // mandatory tensor with no values
770 Fail("%s: failed to get operand address", __func__);
771 return ConstTensorPin();
772 }
773
774 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000775 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
776 if (tensorInfo.HasPerAxisQuantization())
777 {
778 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
779 }
780
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 if (overrideTensorShape != nullptr)
782 {
783 tensorInfo.SetShape(*overrideTensorShape);
784 }
785 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
786}
787
788template<typename HalPolicy,
789 typename HalOperation = typename HalPolicy::Operation,
790 typename HalModel = typename HalPolicy::Model>
791ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
792 uint32_t inputIndex,
793 const HalModel& model,
794 const ConversionData& data,
795 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
796 const armnn::TensorShape* overrideTensorShape = nullptr,
797 bool optional = false)
798{
799 using HalOperand = typename HalPolicy::Operand;
800
801 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
802 if (!operand)
803 {
804 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
805 return ConstTensorPin();
806 }
807 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
808 model,
809 data,
810 dimensionMappings,
811 overrideTensorShape,
812 optional);
813}
814
815template<typename HalPolicy,
816 typename OutputType,
817 typename HalOperandType = typename HalPolicy::OperandType,
818 typename HalOperation = typename HalPolicy::Operation,
819 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100820bool GetInputScalar(const HalOperation& operation,
821 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100822 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100823 OutputType& outValue,
824 const HalModel& model,
825 const ConversionData& data)
826{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100827 using HalOperand = typename HalPolicy::Operand;
828
829 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100830 if (!operand)
831 {
832 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
833 }
834
835 if (operand->type != type)
836 {
837 return Fail("%s: unexpected operand type: %s (should be %s)",
838 __func__, toString(operand->type).c_str(), toString(type).c_str());
839 }
840
841 if (operand->location.length != sizeof(OutputType))
842 {
843 return Fail("%s: incorrect operand location length: %i (should be %i)",
844 __func__, operand->location.length, sizeof(OutputType));
845 }
846
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100848 if (!valueAddress)
849 {
850 return Fail("%s: failed to get address for operand", __func__);
851 }
852
853 outValue = *(static_cast<const OutputType*>(valueAddress));
854 return true;
855}
856
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100857template<typename HalPolicy,
858 typename HalOperation = typename HalPolicy::Operation,
859 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100860bool GetInputInt32(const HalOperation& operation,
861 uint32_t inputIndex,
862 int32_t& outValue,
863 const HalModel& model,
864 const ConversionData& data)
865{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100866 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100867}
868
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100869template<typename HalPolicy,
870 typename HalOperation = typename HalPolicy::Operation,
871 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100872bool GetInputFloat32(const HalOperation& operation,
873 uint32_t inputIndex,
874 float& outValue,
875 const HalModel& model,
876 const ConversionData& data)
877{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100878 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100879}
880
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100881template<typename HalPolicy,
882 typename HalOperation = typename HalPolicy::Operation,
883 typename HalOperandType = typename HalPolicy::OperandType,
884 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100885bool GetInputActivationFunctionImpl(const HalOperation& operation,
886 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100887 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100888 ActivationFn& outActivationFunction,
889 const HalModel& model,
890 const ConversionData& data)
891{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100892 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100893 {
894 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
895 __func__,
896 toString(type).c_str(),
897 toString(OperandType::INT32).c_str(),
898 toString(OperandType::TENSOR_INT32).c_str());
899 }
900
901 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100903 {
904 return Fail("%s: failed to get activation input value", __func__);
905 }
906 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
907 return true;
908}
909
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100910template<typename HalPolicy,
911 typename HalOperation = typename HalPolicy::Operation,
912 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100913bool GetInputActivationFunction(const HalOperation& operation,
914 uint32_t inputIndex,
915 ActivationFn& outActivationFunction,
916 const HalModel& model,
917 const ConversionData& data)
918{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100919 return GetInputActivationFunctionImpl<HalPolicy>(operation,
920 inputIndex,
921 HalPolicy::OperandType::INT32,
922 outActivationFunction,
923 model,
924 data);
arovir01b0717b52018-09-05 17:03:25 +0100925}
926
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100927template<typename HalPolicy,
928 typename HalOperation = typename HalPolicy::Operation,
929 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100930bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
931 uint32_t inputIndex,
932 ActivationFn& outActivationFunction,
933 const HalModel& model,
934 const ConversionData& data)
935{
936 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 return GetInputActivationFunctionImpl<HalPolicy>(operation,
938 inputIndex,
939 HalPolicy::OperandType::INT32,
940 outActivationFunction,
941 model,
942 data);
arovir01b0717b52018-09-05 17:03:25 +0100943}
944
945
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100946template<typename HalPolicy,
947 typename HalOperation = typename HalPolicy::Operation,
948 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100949bool GetOptionalInputActivation(const HalOperation& operation,
950 uint32_t inputIndex,
951 ActivationFn& activationFunction,
952 const HalModel& model,
953 const ConversionData& data)
954{
955 if (operation.inputs.size() <= inputIndex)
956 {
957 activationFunction = ActivationFn::kActivationNone;
958 }
959 else
960 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100961 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100962 {
963 return Fail("%s: Operation has invalid inputs", __func__);
964 }
965 }
966 return true;
967}
968
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100969template<typename HalPolicy,
970 typename ConvolutionDescriptor,
971 typename HalOperation = typename HalPolicy::Operation,
972 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100973bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
974 uint32_t dilationXIndex,
975 ConvolutionDescriptor& descriptor,
976 const HalModel& model,
977 const ConversionData& data)
978{
979 bool success = true;
980 if (operation.inputs.size() >= dilationXIndex + 2)
981 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982 success &= GetInputScalar<HalPolicy>(operation,
983 dilationXIndex,
984 HalPolicy::OperandType::INT32,
985 descriptor.m_DilationX,
986 model,
987 data);
988 success &= GetInputScalar<HalPolicy>(operation,
989 dilationXIndex + 1,
990 HalPolicy::OperandType::INT32,
991 descriptor.m_DilationY,
992 model,
993 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100994 }
995
996 return success;
997}
998
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100999template<typename HalPolicy,
1000 typename HalOperand = typename HalPolicy::Operand,
1001 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001002bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001003 std::vector<int32_t>& outValues,
1004 const HalModel& model,
1005 const ConversionData& data)
1006{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001007 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001008 {
1009 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1010 }
1011
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001012 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001013 if (!startAddress)
1014 {
1015 return Fail("%s: failed to get operand address", __func__, operand.type);
1016 }
1017
1018 // Check number of bytes is sensible
1019 const uint32_t numBytes = operand.location.length;
1020 if (numBytes % sizeof(int32_t) != 0)
1021 {
1022 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1023 __func__, numBytes, sizeof(int32_t));
1024 }
1025
1026 outValues.resize(numBytes / sizeof(int32_t));
1027 memcpy(outValues.data(), startAddress, numBytes);
1028 return true;
1029}
1030
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031template<typename HalPolicy,
1032 typename HalOperation = typename HalPolicy::Operation,
1033 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001034bool GetInputPaddingScheme(const HalOperation& operation,
1035 uint32_t inputIndex,
1036 PaddingScheme& outPaddingScheme,
1037 const HalModel& model,
1038 const ConversionData& data)
1039{
1040 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001041 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001042 {
1043 return Fail("%s: failed to get padding scheme input value", __func__);
1044 }
1045
1046 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1047 return true;
1048}
1049
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001050template<typename HalPolicy,
1051 typename HalOperation = typename HalPolicy::Operation,
1052 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001053LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1054 uint32_t inputIndex,
1055 const HalModel& model,
1056 ConversionData& data)
1057{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001058 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001059 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001060 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1061
1062 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001063 if (!operand)
1064 {
1065 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1066 return LayerInputHandle();
1067 }
1068
1069 if (!IsOperandTypeSupportedForTensors(operand->type))
1070 {
1071 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1072 return LayerInputHandle();
1073 }
1074
Sadik Armagan44bcc022019-06-18 17:21:36 +01001075 try
arovir01b0717b52018-09-05 17:03:25 +01001076 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001077 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001078 if (IsDynamicTensor(operandTensorInfo))
1079 {
1080 Fail("%s: dynamic input tensors are not supported", __func__);
1081 return LayerInputHandle();
1082 }
arovir01b0717b52018-09-05 17:03:25 +01001083
Sadik Armagan44bcc022019-06-18 17:21:36 +01001084 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001085 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001086 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001087 {
1088 // NOTE: We must check whether we can support the input tensor on at least one
1089 // of the provided backends; otherwise we cannot convert the operation
1090 bool isInputSupported = false;
1091 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1092 IsInputSupported,
1093 data.m_Backends,
1094 isInputSupported,
1095 operandTensorInfo);
1096
1097 if (!isInputSupported)
1098 {
1099 Fail("%s: unsupported input tensor", __func__);
1100 return LayerInputHandle();
1101 }
1102
1103 BOOST_FALLTHROUGH; // intentional fallthrough
1104 }
1105 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001106 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001107 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001108 // The tensor is either an operand internal to the model, or a model input.
1109 // It can be associated with an ArmNN output slot for an existing layer.
1110
1111 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1112 const uint32_t operandIndex = operation.inputs[inputIndex];
1113 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001114 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001115 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001116 case HalOperandLifeTime::CONSTANT_REFERENCE:
1117 {
1118 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1119 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1120 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001121 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001122 bool isSupported = false;
1123 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1124 IsConstantSupported,
1125 data.m_Backends,
1126 isSupported,
1127 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001128 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001129 {
1130 return LayerInputHandle();
1131 }
1132
1133 armnn::IConnectableLayer* constantLayer =
1134 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1135 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1136 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1137
1138 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1139 }
1140 else
1141 {
1142 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001143 return LayerInputHandle();
1144 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001145 break;
arovir01b0717b52018-09-05 17:03:25 +01001146 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001147 default:
arovir01b0717b52018-09-05 17:03:25 +01001148 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001149 // Unsupported lifetime for an input tensor
1150 Fail("%s: unsupported lifetime for input tensor: %s",
1151 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001152 return LayerInputHandle();
1153 }
arovir01b0717b52018-09-05 17:03:25 +01001154 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001155 }
1156 catch (UnsupportedOperand<HalOperandType>& e)
1157 {
1158 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1159 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001160 }
1161}
1162
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001163template<typename HalPolicy,
1164 typename HalOperation = typename HalPolicy::Operation,
1165 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001166bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1167 uint32_t operationOutputIndex,
1168 armnn::IConnectableLayer& layer,
1169 uint32_t layerOutputIndex,
1170 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001171 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001172{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001173 using HalOperand = typename HalPolicy::Operand;
1174
1175 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001176 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1177 {
1178 return false;
1179 }
1180
1181 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1182
1183 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1184 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1185
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001186 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001187
1188 return true;
1189}
1190
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001191template<typename HalPolicy,
1192 typename HalOperation = typename HalPolicy::Operation,
1193 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001194armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1195 uint32_t inputIndex,
1196 const HalModel& model,
1197 ConversionData& data)
1198{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001199 using HalOperand = typename HalPolicy::Operand;
1200
1201 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001202 if (!operand)
1203 {
1204 return armnn::DataLayout::NHWC;
1205 }
1206
1207 if (!IsBool(*operand))
1208 {
1209 return armnn::DataLayout::NHWC;
1210 }
1211
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001212 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001213 if (!valueAddress)
1214 {
1215 return armnn::DataLayout::NHWC;
1216 }
1217
1218 if (*(static_cast<const bool*>(valueAddress)))
1219 {
1220 return armnn::DataLayout::NCHW;
1221 }
1222 else
1223 {
1224 return armnn::DataLayout::NHWC;
1225 }
1226}
1227
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001228template<typename HalPolicy,
1229 typename HalOperation = typename HalPolicy::Operation,
1230 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001231bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1232 uint32_t outputIndex,
1233 armnn::IConnectableLayer& layer,
1234 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001235 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001236{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001237 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1238 outputIndex,
1239 layer,
1240 outputIndex,
1241 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001242 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001243}
1244
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001245template<typename HalPolicy,
1246 typename HalOperation = typename HalPolicy::Operation,
1247 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001248bool ConvertToActivation(const HalOperation& operation,
1249 const char* operationName,
1250 const armnn::ActivationDescriptor& activationDesc,
1251 const HalModel& model,
1252 ConversionData& data)
1253{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001254 using HalOperand = typename HalPolicy::Operand;
1255
1256 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001257 if (!input.IsValid())
1258 {
1259 return Fail("%s: Input 0 is invalid", operationName);
1260 }
1261
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001262 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001263 if (!outputOperand)
1264 {
1265 return false;
1266 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001267
1268 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001269 if (IsDynamicTensor(outInfo))
1270 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001271 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001272 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001273
1274 bool isSupported = false;
1275 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1276 IsActivationSupported,
1277 data.m_Backends,
1278 isSupported,
1279 input.GetTensorInfo(),
1280 outInfo,
1281 activationDesc);
1282 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001283 {
1284 return false;
1285 }
1286
1287 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1288 BOOST_ASSERT(layer != nullptr);
1289 input.Connect(layer->GetInputSlot(0));
1290
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001291 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001292}
1293
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001294template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001295 typename HalOperation = typename HalPolicy::Operation,
1296 typename HalModel = typename HalPolicy::Model>
1297bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1298{
1299 armnn::ActivationDescriptor desc;
1300 desc.m_Function = armnn::ActivationFunction::ReLu;
1301
1302 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1303}
1304
1305template<typename HalPolicy,
1306 typename HalOperation = typename HalPolicy::Operation,
1307 typename HalModel = typename HalPolicy::Model>
1308bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1309{
1310 armnn::ActivationDescriptor desc;
1311 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1312 desc.m_A = 1.0f;
1313 desc.m_B = -1.0f;
1314
1315 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1316}
1317
1318template<typename HalPolicy,
1319 typename HalOperation = typename HalPolicy::Operation,
1320 typename HalModel = typename HalPolicy::Model>
1321bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1322{
1323 armnn::ActivationDescriptor desc;
1324 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1325 desc.m_A = 6.0f;
1326
1327 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1328}
1329
1330template<typename HalPolicy,
1331 typename HalOperation = typename HalPolicy::Operation,
1332 typename HalModel = typename HalPolicy::Model>
1333bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1334{
1335 armnn::ActivationDescriptor desc;
1336 desc.m_Function = armnn::ActivationFunction::TanH;
1337 desc.m_A = 1.0f; // android nn does not support tanH parameters
1338 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1339
1340 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1341}
1342
1343template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001344 typename HalOperation = typename HalPolicy::Operation,
1345 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001346bool ConvertPaddings(const HalOperation& operation,
1347 const HalModel& model,
1348 ConversionData& data,
1349 unsigned int rank,
1350 armnn::PadDescriptor& padDescriptor)
1351{
1352 using HalOperand = typename HalPolicy::Operand;
1353
1354 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1355 if (!paddingsOperand)
1356 {
1357 return Fail("%s: Could not read paddings operand", __func__);
1358 }
1359
1360 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1361 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1362 {
1363 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1364 }
1365
1366 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001367 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1368 {
1369 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1370 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001371
1372 // add padding for each dimension of input tensor.
1373 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1374 {
1375 int paddingBeforeInput = paddings[i];
1376 int paddingAfterInput = paddings[i + 1];
1377
1378 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1379 {
1380 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1381 }
1382
1383 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1384 }
1385
1386 return true;
1387}
1388
1389template<typename HalPolicy,
1390 typename HalOperation = typename HalPolicy::Operation,
1391 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001392bool ConvertPooling2d(const HalOperation& operation,
1393 const char* operationName,
1394 armnn::PoolingAlgorithm poolType,
1395 const HalModel& model,
1396 ConversionData& data)
1397{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001398 using HalOperand = typename HalPolicy::Operand;
1399 using HalOperandType = typename HalPolicy::OperandType;
1400
1401 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001402 if (!input.IsValid())
1403 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001404 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001405 }
1406
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001407 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001408 if (!output)
1409 {
1410 return Fail("%s: Could not read output 0", __func__);
1411 }
1412
1413 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1414 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1415
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001416 if (IsDynamicTensor(outputInfo))
1417 {
1418 return Fail("%s: Dynamic output tensors are not supported", __func__);
1419 }
1420
arovir01b0717b52018-09-05 17:03:25 +01001421 armnn::Pooling2dDescriptor desc;
1422 desc.m_PoolType = poolType;
1423 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001424 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001425
1426 ActivationFn activation;
1427
Sadik Armagan15d63e22019-07-26 16:59:35 +01001428 auto inputSize = operation.inputs.size();
1429
1430 if (inputSize >= 10)
1431 {
1432 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1433 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1434 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1435 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1436 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1437 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1438 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1439 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1440 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1441 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1442 {
1443 return Fail("%s: Operation has invalid inputs", operationName);
1444 }
1445
1446 if (Is12Operand(*output))
1447 {
1448 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1449 }
1450 }
1451 else
arovir01b0717b52018-09-05 17:03:25 +01001452 {
1453 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1454 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001455 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1456 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1457 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1458 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1459 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1460 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001461 {
1462 return Fail("%s: Operation has invalid inputs", operationName);
1463 }
1464
Sadik Armagan15d63e22019-07-26 16:59:35 +01001465 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001466 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001467 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001468 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001469
1470 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1471 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1472 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1473
1474 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1475 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001476 }
1477
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001478 bool isSupported = false;
1479 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1480 IsPooling2dSupported,
1481 data.m_Backends,
1482 isSupported,
1483 inputInfo,
1484 outputInfo,
1485 desc);
1486 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001487 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001488 return false;
arovir01b0717b52018-09-05 17:03:25 +01001489 }
arovir01b0717b52018-09-05 17:03:25 +01001490
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001491 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1492 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001493 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001494 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001495 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001496
1497 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1498 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001499 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001500 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001501 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001502
1503 input.Connect(pooling2dLayer->GetInputSlot(0));
1504
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001505 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001506}
1507
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001508template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001509 typename HalOperation = typename HalPolicy::Operation,
1510 typename HalModel = typename HalPolicy::Model>
1511bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001512{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001513 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001514
1515 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1516 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1517
1518 if (!input0.IsValid() || !input1.IsValid())
1519 {
1520 return Fail("%s: Operation has invalid inputs", __func__);
1521 }
1522
1523 // The FuseActivation parameter is always the input index 2
1524 // and it should be optional
1525 ActivationFn activationFunction;
1526 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1527 {
1528 return Fail("%s: Operation has invalid inputs", __func__);
1529 }
1530
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001531 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001532 if (!outputOperand)
1533 {
1534 return false;
1535 }
1536
1537 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1538 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1539
1540 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1541 if (IsDynamicTensor(outputInfo))
1542 {
1543 return Fail("%s: Dynamic output tensors are not supported", __func__);
1544 }
1545
1546 bool isSupported = false;
1547 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1548 IsAdditionSupported,
1549 data.m_Backends,
1550 isSupported,
1551 inputInfo0,
1552 inputInfo1,
1553 outputInfo);
1554 if (!isSupported)
1555 {
1556 return false;
1557 }
1558
1559 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1560 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1561
1562 if (endLayer != nullptr)
1563 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00001564 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001565 if (!isReshapeSupported)
1566 {
1567 return false;
1568 }
1569
Mike Kelly46272802019-08-14 17:00:48 +01001570 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1571 }
1572 else
1573 {
1574 return Fail("%s: ProcessActivation failed", __func__);
1575 }
1576}
1577
1578template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001579 typename HalOperation = typename HalPolicy::Operation,
1580 typename HalModel = typename HalPolicy::Model>
1581bool ConvertArgMinMax(const HalOperation& operation,
1582 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001583 ConversionData& data,
1584 armnn::ArgMinMaxFunction argMinMaxFunction)
1585{
1586 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1587
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001588 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001589 using HalOperandType = typename HalPolicy::OperandType;
1590
1591 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1592
1593 if (!input0.IsValid())
1594 {
1595 return Fail("%s: Operation has invalid inputs", __func__);
1596 }
1597
1598 int32_t axis;
1599 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1600 {
1601 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1602 }
1603
1604 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1605 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1606
1607 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1608 {
1609 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1610 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1611 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1612 return Fail("%s: Axis must be in range [-n, n)", __func__);
1613 }
1614
1615 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1616 if (!output)
1617 {
1618 return Fail("%s: Could not read output 0", __func__);
1619 }
1620
1621 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1622
1623 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1624 if (IsDynamicTensor(outputInfo))
1625 {
1626 return Fail("%s: Dynamic output tensors are not supported", __func__);
1627 }
1628
1629 armnn::ArgMinMaxDescriptor descriptor;
1630 descriptor.m_Function = argMinMaxFunction;
1631 descriptor.m_Axis = axis;
1632
1633 bool isSupported = false;
1634 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1635 IsArgMinMaxSupported,
1636 data.m_Backends,
1637 isSupported,
1638 inputInfo0,
1639 outputInfo,
1640 descriptor);
1641 if (!isSupported)
1642 {
1643 return false;
1644 }
1645
1646 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1647 assert(layer != nullptr);
1648
1649 input0.Connect(layer->GetInputSlot(0));
1650
1651 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1652}
1653
1654template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001655 typename HalOperation = typename HalPolicy::Operation,
1656 typename HalModel = typename HalPolicy::Model>
1657bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001658{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001659 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001660 using HalOperandType = typename HalPolicy::OperandType;
1661
1662 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1663 if (operation.inputs.size() <= 1)
1664 {
1665 return Fail("%s: Operation has insufficient arguments", __func__);
1666 }
1667
1668 // Get inputs and outputs
1669 const std::size_t numInputTensors = operation.inputs.size() - 1;
1670
1671 int32_t concatDim;
1672 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1673 {
1674 return Fail("%s: Operation has invalid inputs", __func__);
1675 }
1676
1677 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1678 if (!outputOperand)
1679 {
1680 return Fail("%s: Operation has no outputs", __func__);
1681 }
1682
1683
1684 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1685 armnn::TensorShape outputShape = outputInfo.GetShape();
1686
1687 //
1688 // handle negative concat dims along the lines of tensorflow as described here:
1689 // https://www.tensorflow.org/api_docs/python/tf/concat
1690 // "negative axis refers to axis + rank(values)-th dimension"
1691 //
1692 if (concatDim < 0)
1693 {
1694 concatDim += outputShape.GetNumDimensions();
1695 }
1696
1697 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1698 {
1699 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1700 }
1701
1702 std::vector<LayerInputHandle> inputHandles;
1703 std::vector<armnn::TensorShape> inputShapes;
1704
1705 inputHandles.reserve(numInputTensors);
1706 inputShapes.reserve(numInputTensors);
1707
1708 bool inputsHaveBeenReshaped = false;
1709 unsigned int tensorDimensionsAdded = 0;
1710
1711 for (uint32_t i = 0; i < numInputTensors; ++i)
1712 {
1713 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1714 if (!operand)
1715 {
1716 return Fail("%s: Operation has invalid inputs", __func__);
1717 }
1718
Teresa Charlin3b959602019-10-31 17:05:47 +00001719 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1720 if (!operandInputHandle.IsValid())
1721 {
1722 return Fail("%s: Operation has invalid inputs", __func__);
1723 }
Mike Kellyb8805202019-07-31 17:25:43 +01001724
Teresa Charlin3b959602019-10-31 17:05:47 +00001725 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001726 if (operandShape.GetNumDimensions() == 0)
1727 {
1728 return Fail("%s: Operands with rank 0 are not supported", __func__);
1729 }
1730
1731 if (RequiresReshape(operandShape))
1732 {
1733 inputsHaveBeenReshaped = true;
1734
1735 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1736
1737 // Expand the tensor to three dimensions
1738 if (operandShape.GetNumDimensions() == 2)
1739 {
1740 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1741 tensorDimensionsAdded = 1;
1742 }
1743 else
1744 {
1745 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1746 tensorDimensionsAdded = 2;
1747 }
1748
Kevin Mayaed08ac2019-12-12 16:33:31 +00001749 armnn::ReshapeDescriptor reshapeDescriptor;
1750 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
1751
1752 bool isSupported = false;
1753 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1754 IsReshapeSupported,
1755 data.m_Backends,
1756 isSupported,
1757 operandInputHandle.GetTensorInfo(),
1758 reshapeInfo,
1759 reshapeDescriptor);
1760 if (!isSupported)
1761 {
1762 return false;
1763 }
1764
Mike Kellyb8805202019-07-31 17:25:43 +01001765 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1766 *data.m_Network,
1767 operandInputHandle,
1768 reshapeInfo
1769 );
1770
1771 // Point to the reshape operation rather then the input operation
1772 operandShape = reshapeInfo.GetShape();
1773 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1774 }
1775
1776 inputShapes.emplace_back(operandShape);
1777 inputHandles.emplace_back(operandInputHandle);
1778
1779 if (!inputHandles.back().IsValid())
1780 {
1781 return Fail("%s: Operation has invalid inputs", __func__);
1782 }
1783 }
1784
1785 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1786
1787 if (inputsHaveBeenReshaped)
1788 {
1789 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1790 concatDim += tensorDimensionsAdded;
1791
1792 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1793 if (tensorDimensionsAdded == 1)
1794 {
1795 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1796 }
1797 else if (tensorDimensionsAdded == 2)
1798 {
1799 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1800 }
1801 }
1802
1803 // Check if permutations is required and get the pair of permutations required for the concatenation.
1804 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1805 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1806 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1807
1808 bool needPermute =
1809 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1810
1811 if (needPermute)
1812 {
Mike Kelly4a956582020-02-28 10:32:09 +00001813 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
Mike Kellyb8805202019-07-31 17:25:43 +01001814 }
1815
1816 outputInfo.SetShape(outputShape);
1817
1818 // this is no-op for identity swizzles, otherwise it replaces both
1819 // the handles and shapes with the swizzled layer output handles and shapes
Kevin Mayaed08ac2019-12-12 16:33:31 +00001820 if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo))
1821 {
1822 return false;
1823 }
Mike Kellyb8805202019-07-31 17:25:43 +01001824
1825 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1826 armnn::OriginsDescriptor concatDescriptor;
1827
1828 try
1829 {
1830 // The concat descriptor is always created across the only supported concat dimension
1831 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1832 concatDescriptor =
1833 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1834 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001835 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001836 {
1837 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1838 }
1839
1840 // Validate the output shape is correct given the input shapes based on the
1841 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1842 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1843 {
1844 return Fail("%s: Error validating the output shape for concat", __func__);
1845 }
1846
1847 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1848 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1849 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1850
1851 bool isSupported = false;
1852 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1853 IsConcatSupported,
1854 data.m_Backends,
1855 isSupported,
1856 inputTensorInfos,
1857 outputInfo,
1858 concatDescriptor);
1859 if (!isSupported)
1860 {
1861 return false;
1862 }
1863
1864 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1865 assert(layer != nullptr);
1866 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1867
1868 // Connect inputs to the layer
1869 const int numInputSlots = layer->GetNumInputSlots();
1870 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1871 for (int i = 0; i < numInputSlots; ++i)
1872 {
1873 // connect the input directly to the merge (concat) layer
1874 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1875 }
1876
1877 if (needPermute)
1878 {
Mike Kelly4a956582020-02-28 10:32:09 +00001879 armnn::TransposeDescriptor transposeDesc;
1880 transposeDesc.m_DimMappings = permutationPair.second;
Kevin Mayaed08ac2019-12-12 16:33:31 +00001881
1882 bool isSupported = false;
1883 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00001884 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00001885 data.m_Backends,
1886 isSupported,
1887 layer->GetOutputSlot(0).GetTensorInfo(),
1888 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00001889 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00001890 if (!isSupported)
1891 {
1892 return false;
1893 }
Mike Kellyb8805202019-07-31 17:25:43 +01001894 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Mike Kelly4a956582020-02-28 10:32:09 +00001895 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network,
1896 layer->GetOutputSlot(0),
1897 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01001898 layer = &deswizzleLayer;
1899 }
1900
1901 if (inputsHaveBeenReshaped)
1902 {
1903 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1904
1905 // Undo the reshape knowing the amount of dimensions added
1906 if (tensorDimensionsAdded == 1)
1907 {
1908 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1909 afterConcatInfo.GetShape()[2] }));
1910 }
1911 else if (tensorDimensionsAdded == 2)
1912 {
1913 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1914 }
1915
Kevin Mayaed08ac2019-12-12 16:33:31 +00001916 armnn::ReshapeDescriptor reshapeDescriptor;
1917 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
1918
1919 bool isSupported = false;
1920 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1921 IsReshapeSupported,
1922 data.m_Backends,
1923 isSupported,
1924 layer->GetOutputSlot(0).GetTensorInfo(),
1925 afterConcatInfo,
1926 reshapeDescriptor);
1927 if (!isSupported)
1928 {
1929 return false;
1930 }
1931
Mike Kellyb8805202019-07-31 17:25:43 +01001932 layer = &AddReshapeLayer(
1933 *data.m_Network,
1934 layer->GetOutputSlot(0),
1935 afterConcatInfo
1936 );
1937 }
1938
1939 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1940}
1941
1942template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001943 typename HalOperation = typename HalPolicy::Operation,
1944 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001945bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1946{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001947 using HalOperand = typename HalPolicy::Operand;
1948 using HalOperandType = typename HalPolicy::OperandType;
1949
1950 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001951 if (!input.IsValid())
1952 {
1953 return Fail("%s: Operation has invalid inputs", __func__);
1954 }
1955
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001956 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001957 if (!output)
1958 {
1959 return Fail("%s: Could not read output 0", __func__);
1960 }
1961
1962 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001963 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001964
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001965 if (IsDynamicTensor(outputInfo))
1966 {
1967 return Fail("%s: Dynamic output tensors are not supported", __func__);
1968 }
1969
Mike Kellyb5fdf382019-06-11 16:35:25 +01001970 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001971 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1972 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001973
1974 if (!weightsPin.IsValid() || !biasPin.IsValid())
1975 {
1976 return Fail("%s: Operation has invalid inputs", __func__);
1977 }
1978
1979 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001980 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001981 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1982
1983 armnn::Convolution2dDescriptor desc;
1984 desc.m_DataLayout = armnn::DataLayout::NHWC;
1985 ActivationFn activation;
1986
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001987 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001988 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001989 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1990 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1991 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1992 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1993 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1994 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001995 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001996 {
1997 return Fail("%s: Operation has invalid inputs", __func__);
1998 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001999 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002000 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002001 {
2002 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002003 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2004 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2005 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002006 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002007 {
2008 return Fail("%s: Operation has invalid inputs", __func__);
2009 }
2010
2011 const uint32_t kernelX = weights.GetShape()[2];
2012 const uint32_t kernelY = weights.GetShape()[1];
2013 const uint32_t inputX = inputInfo.GetShape()[2];
2014 const uint32_t inputY = inputInfo.GetShape()[1];
2015
2016 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2017 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002018 }
2019 else
2020 {
2021 return Fail("%s: Unsupported number of operation inputs", __func__);
2022 }
2023
2024 desc.m_BiasEnabled = true;
2025 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2026
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002027 bool isSupported = false;
2028 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2029 IsConvolution2dSupported,
2030 data.m_Backends,
2031 isSupported,
2032 inputInfo,
2033 outputInfo,
2034 desc,
2035 weights.GetInfo(),
2036 biases);
2037 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002038 {
2039 return false;
2040 }
2041
2042 armnn::IConnectableLayer* startLayer =
2043 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2044
2045 if (!startLayer)
2046 {
2047 return Fail("%s: AddConvolution2dLayer failed", __func__);
2048 }
2049
2050 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2051
2052 if (!endLayer)
2053 {
2054 return Fail("%s: ProcessActivation failed", __func__);
2055 }
2056
2057 input.Connect(startLayer->GetInputSlot(0));
2058
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002059 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002060}
2061
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002062template<typename HalPolicy,
2063 typename HalOperation = typename HalPolicy::Operation,
2064 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002065bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2066{
2067 using HalOperand = typename HalPolicy::Operand;
2068 using HalOperandType = typename HalPolicy::OperandType;
2069
2070 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2071 if (!input.IsValid() )
2072 {
2073 return Fail("%s: Operation has invalid inputs", __func__);
2074 }
2075
2076 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2077 unsigned int rank = inputInfo.GetNumDimensions();
2078 if (rank != 4)
2079 {
2080 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2081 }
2082
2083 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2084 if (!output)
2085 {
2086 return Fail("%s: Could not read output 0", __func__);
2087 }
2088
2089 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2090 if (IsDynamicTensor(outputInfo))
2091 {
2092 return Fail("%s: Dynamic output tensors are not supported", __func__);
2093 }
2094
2095 armnn::DepthToSpaceDescriptor descriptor;
2096
2097 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2098 if (descriptor.m_BlockSize <= 1)
2099 {
2100 return Fail("%s: Block size must be at least 1 in all dimensions");
2101 }
2102
2103 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2104 if (Is12Operand(*output))
2105 {
2106 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2107 }
2108
2109 bool isSupported = false;
2110 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2111 IsDepthToSpaceSupported,
2112 data.m_Backends,
2113 isSupported,
2114 inputInfo,
2115 outputInfo,
2116 descriptor);
2117 if (!isSupported)
2118 {
2119 return false;
2120 }
2121
2122 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2123 assert(layer != nullptr);
2124 input.Connect(layer->GetInputSlot(0));
2125
2126 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2127}
2128
2129template<typename HalPolicy,
2130 typename HalOperation = typename HalPolicy::Operation,
2131 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002132bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2133{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002134 using HalOperand = typename HalPolicy::Operand;
2135 using HalOperandType = typename HalPolicy::OperandType;
2136
2137 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002138
2139 if (!input.IsValid())
2140 {
2141 return Fail("%s: Operation has invalid inputs", __func__);
2142 }
2143
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002144 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002145
2146 if (!output)
2147 {
2148 return Fail("%s: Could not read output 0", __func__);
2149 }
2150
2151 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002152 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002153
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002154 if (IsDynamicTensor(outputInfo))
2155 {
2156 return Fail("%s: Dynamic output tensors are not supported", __func__);
2157 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002158
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002159 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002160 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002161 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002162
2163 if (weightsOperand == nullptr)
2164 {
2165 return Fail("%s: Operand is invalid", __func__);
2166 }
2167 armnn::DepthwiseConvolution2dDescriptor desc;
2168 desc.m_DataLayout = armnn::DataLayout::NHWC;
2169
Mike Kellyb5fdf382019-06-11 16:35:25 +01002170 // Reinterpret weight data as [ H, W, I, M ]
2171 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2172 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002173 inputInfo.GetShape()[3],
2174 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002175
2176 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2177 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2178
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002179 const ConstTensorPin weightsPin =
2180 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2181 1,
2182 model,
2183 data,
2184 HWIMToMIHW,
2185 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002186
2187 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002188 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002189
2190 if (!weightsPin.IsValid() || !biasPin.IsValid())
2191 {
2192 return Fail("%s: Operation has invalid inputs", __func__);
2193 }
2194
2195 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2196 armnn::ConstTensor bias = biasPin.GetConstTensor();
2197 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2198
2199 ActivationFn activation;
2200
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002201 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002202 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002203 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2204 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2205 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2206 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2207 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2208 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002209 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002210 {
2211 return Fail("%s: Operation has invalid inputs", __func__);
2212 }
2213 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002214 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002215 {
2216 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002217 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2218 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2219 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002220 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002221 {
2222 return Fail("%s: Operation has invalid inputs", __func__);
2223 }
2224
2225 const uint32_t kernelX = weights.GetShape()[3];
2226 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002227 const uint32_t inputX = inputInfo.GetShape()[2];
2228 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002229
2230 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2231 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2232 }
2233 else
2234 {
2235 return Fail("%s: Unsupported number of operation inputs", __func__);
2236 }
2237
2238 desc.m_BiasEnabled = true;
2239 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2240
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002241 bool isSupported = false;
2242 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2243 IsDepthwiseConvolutionSupported,
2244 data.m_Backends,
2245 isSupported,
2246 inputInfo,
2247 outputInfo,
2248 desc,
2249 weights.GetInfo(),
2250 biases);
2251 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002252 {
2253 return false;
2254 }
2255
2256 armnn::IConnectableLayer* startLayer =
2257 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2258 if (!startLayer)
2259 {
2260 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2261 }
2262
2263 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2264 if (!endLayer)
2265 {
2266 return Fail("%s: ProcessActivation failed", __func__);
2267 }
2268
2269 input.Connect(startLayer->GetInputSlot(0));
2270
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002271 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002272}
2273
Mike Kelly3c673942019-07-25 09:26:06 +01002274template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002275 typename HalOperation = typename HalPolicy::Operation,
2276 typename HalModel = typename HalPolicy::Model>
2277bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002278{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002279 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002280
2281 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2282 if (!input.IsValid())
2283 {
2284 return Fail("%s: Operation has invalid input", __func__);
2285 }
2286
Sadik Armagan98c0f662019-11-21 15:54:36 +00002287 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2288 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2289 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2290 {
2291 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2292 }
2293
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002294 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002295 if (!outputOperand)
2296 {
2297 return Fail("%s: Operation has invalid outputs", __func__);
2298 }
2299
2300 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2301 if (IsDynamicTensor(outputInfo))
2302 {
2303 return Fail("%s: Dynamic output tensors are not supported", __func__);
2304 }
2305
2306 bool isSupported = false;
2307 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2308 IsDequantizeSupported,
2309 data.m_Backends,
2310 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002311 inputInfo,
2312 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002313 if (!isSupported)
2314 {
2315 return false;
2316 }
2317
2318 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2319 assert(layer != nullptr);
2320 input.Connect(layer->GetInputSlot(0));
2321
2322 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2323}
2324
2325template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002326 typename HalOperation = typename HalPolicy::Operation,
2327 typename HalModel = typename HalPolicy::Model>
2328bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002329{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002330 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002331
2332 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2333 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2334
2335 if (!input0.IsValid() || !input1.IsValid())
2336 {
2337 return Fail("%s: Operation has invalid inputs", __func__);
2338 }
2339
2340 // The FuseActivation parameter is always the input index 2
2341 // and it should be optional
2342 ActivationFn activationFunction;
2343 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2344 {
2345 return Fail("%s: Operation has invalid inputs", __func__);
2346 }
2347
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002348 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002349 if (!output)
2350 {
2351 return Fail("%s: Could not read output 0", __func__);
2352 }
2353
2354 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2355 if (IsDynamicTensor(outputInfo))
2356 {
2357 return Fail("%s: Dynamic output tensors are not supported", __func__);
2358 }
2359
2360 bool isSupported = false;
2361 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2362 IsDivisionSupported,
2363 data.m_Backends,
2364 isSupported,
2365 input0.GetTensorInfo(),
2366 input1.GetTensorInfo(),
2367 outputInfo);
2368 if (!isSupported)
2369 {
2370 return false;
2371 }
2372
2373 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2374 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2375
2376 if (endLayer)
2377 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002378 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002379 if (!isReshapeSupported)
2380 {
2381 return false;
2382 }
2383
Mike Kelly46272802019-08-14 17:00:48 +01002384 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2385 }
2386 return Fail("%s: ProcessActivation failed", __func__);
2387}
2388
2389template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002390 typename HalOperation = typename HalPolicy::Operation,
2391 typename HalModel = typename HalPolicy::Model>
2392bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002393{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002394 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002395
2396 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2397 if (!input.IsValid())
2398 {
2399 return Fail("%s: Operation has invalid inputs", __func__);
2400 }
2401
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002402 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002403 if (!outputOperand)
2404 {
2405 return Fail("%s: Operation has invalid outputs", __func__);
2406 }
2407
2408 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2409 if (IsDynamicTensor(outputInfo))
2410 {
2411 return Fail("%s: Dynamic output tensors are not supported", __func__);
2412 }
2413
2414 bool isSupported = false;
2415 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2416 IsFloorSupported,
2417 data.m_Backends,
2418 isSupported,
2419 input.GetTensorInfo(),
2420 outputInfo);
2421 if (!isSupported)
2422 {
2423 return false;
2424 }
2425
2426 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2427 assert(layer != nullptr);
2428 input.Connect(layer->GetInputSlot(0));
2429
2430 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2431}
2432
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002433inline bool IsQSymm8(const V1_0::Operand&)
2434{
2435 return false;
2436}
2437
2438#ifdef ARMNN_ANDROID_NN_V1_2
2439
2440inline bool IsQSymm8(const V1_2::Operand& operand)
2441{
2442 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2443}
2444
2445#endif
2446
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002447enum class DequantizeStatus
2448{
2449 SUCCESS,
2450 NOT_REQUIRED,
2451 INVALID_OPERAND
2452};
2453
2454using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2455
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002456template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002457 typename HalOperation = typename HalPolicy::Operation,
2458 typename HalModel = typename HalPolicy::Model>
2459DequantizeResult DequantizeIfRequired(size_t operand_index,
2460 const HalOperation& operation,
2461 const HalModel& model,
2462 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002463{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002464 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002465
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002466 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002467 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002468 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002469 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002470 }
2471
2472 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2473 {
2474 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002475 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002476 }
2477
2478 const size_t weightsInputIndex = operation.inputs[operand_index];
2479
2480 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2481 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2482 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2483 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002484 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002485 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002486 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2487 {
2488 continue;
2489 }
2490
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002491 size_t outOpIndex = weightsInputIndex + 1;
2492 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002493 {
2494 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002495 }
2496
2497 if (outOpIndex != weightsInputIndex)
2498 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002499 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002500 }
2501
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002502 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002503 BOOST_ASSERT(operand);
2504
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002505 if (!IsQSymm8(*operand))
2506 {
2507 // Only supporting dequantize from QSYMM8 to FLOAT
2508 break;
2509 }
2510
2511 // Allocate a new buffer for the dequantized data and manually dequantize
2512 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2513 if (!startValue)
2514 {
2515 // Failed to get the operand address
2516 break;
2517 }
2518
2519 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2520 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002521 const float quantizationScale = operand->scale;
2522
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002523 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2524 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2525 {
2526 float* dstPtr = dequantizedBuffer.get();
2527 BOOST_ASSERT(dstPtr);
2528 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2529 }
2530
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002531 // Construct tensor info for dequantized ConstTensor
2532 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2533 operand->dimensions.data(),
2534 armnn::DataType::Float32);
2535
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002536 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2537 std::move(tensorInfo),
2538 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002539 }
2540
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002541 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002542}
2543
2544template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002545 typename HalOperation = typename HalPolicy::Operation,
2546 typename HalModel = typename HalPolicy::Model>
2547ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2548 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002549 const ConversionData& data,
2550 size_t operandIndex,
2551 bool optional = false)
2552{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002553 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2554
2555 DequantizeStatus status = std::get<3>(dequantized);
2556 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002557 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002558 case DequantizeStatus::INVALID_OPERAND:
2559 {
2560 // return invalid const tensor pin
2561 return ConstTensorPin();
2562 }
2563 case DequantizeStatus::NOT_REQUIRED:
2564 {
2565 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2566 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2567 }
2568 case DequantizeStatus::SUCCESS:
2569 default:
2570 {
2571 return ConstTensorPin(
2572 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2573 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002574 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002575}
2576
2577
Mike Kelly46272802019-08-14 17:00:48 +01002578template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002579 typename HalOperation = typename HalPolicy::Operation,
2580 typename HalModel = typename HalPolicy::Model>
2581bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002582{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002583 using HalOperand = typename HalPolicy::Operand;
2584
Mike Kelly46272802019-08-14 17:00:48 +01002585 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2586 if (!input.IsValid())
2587 {
2588 return Fail("%s: Operation has invalid inputs", __func__);
2589 }
2590
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002591 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002592 if (!output)
2593 {
2594 return Fail("%s: Could not read output 0", __func__);
2595 }
2596
2597 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2598 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2599
2600 if (IsDynamicTensor(outputInfo))
2601 {
2602 return Fail("%s: Dynamic output tensors are not supported", __func__);
2603 }
2604
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002605 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2606 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002607
2608 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002609 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002610 return Fail("%s: Operation has invalid weights", __func__);
2611 }
2612
2613 if (!biasPin.IsValid())
2614 {
2615 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002616 }
2617
2618 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2619 armnn::ConstTensor bias = biasPin.GetConstTensor();
2620 armnn::TensorInfo reshapedInfo = inputInfo;
2621
2622 try
2623 {
2624 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002625 }
2626 catch (const std::exception& e)
2627 {
Mike Kelly46272802019-08-14 17:00:48 +01002628 return Fail("%s: %s", __func__, e.what());
2629 }
2630
2631 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2632 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2633
2634 ActivationFn activationFunction;
2635 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2636 {
2637 return Fail("%s: Operation has invalid inputs", __func__);
2638 }
2639
2640 armnn::FullyConnectedDescriptor desc;
2641 desc.m_TransposeWeightMatrix = true;
2642 desc.m_BiasEnabled = true;
2643
FinnWilliamsArm7b8d2e62020-01-08 14:57:47 +00002644 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
2645 weights.GetInfo().GetShape(),
2646 outputInfo.GetShape(),
2647 desc.m_TransposeWeightMatrix))
2648 {
2649 return Fail("%s: Expected outputShape does not match actual outputShape", __func__);
2650 }
2651
Mike Kelly46272802019-08-14 17:00:48 +01002652 bool isSupported = false;
2653 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2654 IsFullyConnectedSupported,
2655 data.m_Backends,
2656 isSupported,
2657 reshapedInfo,
2658 outputInfo,
2659 weights.GetInfo(),
2660 bias.GetInfo(),
2661 desc);
2662 if (!isSupported)
2663 {
2664 return false;
2665 }
2666
2667 armnn::IConnectableLayer* startLayer =
2668 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2669 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2670
2671 if (endLayer != nullptr)
2672 {
2673 if (inputInfo.GetNumDimensions() > 2U)
2674 {
2675 armnn::ReshapeDescriptor reshapeDescriptor;
2676 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2677
2678 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2679 assert(reshapeLayer != nullptr);
2680 input.Connect(reshapeLayer->GetInputSlot(0));
2681 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2682 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2683 }
2684 else
2685 {
2686 input.Connect(startLayer->GetInputSlot(0));
2687 }
2688
2689 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2690 }
2691 else
2692 {
2693 return Fail("%s: ProcessActivation failed", __func__);
2694 }
2695}
2696
2697template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002698 typename HalOperation = typename HalPolicy::Operation,
2699 typename HalModel = typename HalPolicy::Model>
2700bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002701{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002702 using HalOperand = typename HalPolicy::Operand;
2703
Mike Kelly999e2092019-08-15 10:46:46 +01002704 if (operation.inputs.size() != 1)
2705 {
2706 return Fail("%s: Optional inputs are not supported", __func__);
2707 }
2708
Mike Kelly46272802019-08-14 17:00:48 +01002709 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2710 if (!input.IsValid())
2711 {
2712 return Fail("%s: Operation has invalid inputs", __func__);
2713 }
2714
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002715 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002716 if (!output)
2717 {
2718 return Fail("%s: Could not read output 0", __func__);
2719 }
2720
2721 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2722 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2723
2724 if (IsDynamicTensor(outputInfo))
2725 {
2726 return Fail("%s: Dynamic output tensors are not supported", __func__);
2727 }
2728 if (outputInfo.GetNumDimensions() != 4u)
2729 {
2730 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2731 }
2732
2733 armnn::L2NormalizationDescriptor desc;
2734 desc.m_DataLayout = armnn::DataLayout::NHWC;
2735
2736 bool isSupported = false;
2737 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2738 IsL2NormalizationSupported,
2739 data.m_Backends,
2740 isSupported,
2741 inputInfo,
2742 outputInfo,
2743 desc);
2744 if (!isSupported)
2745 {
2746 return false;
2747 }
2748
2749 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2750 assert(layer != nullptr);
2751 input.Connect(layer->GetInputSlot(0));
2752
2753 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2754}
2755
2756template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002757 typename HalOperation = typename HalPolicy::Operation,
2758 typename HalModel = typename HalPolicy::Model>
2759bool ConvertLocalResponseNormalization(const HalOperation& operation,
2760 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002761 ConversionData& data)
2762{
Mike Kelly999e2092019-08-15 10:46:46 +01002763 if (operation.inputs.size() != 5)
2764 {
2765 return Fail("%s: Optional inputs are not supported", __func__);
2766 }
2767
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002768 using HalOperand = typename HalPolicy::Operand;
2769 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002770
2771 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2772 if (!input.IsValid())
2773 {
2774 return Fail("%s: Operation has invalid inputs", __func__);
2775 }
2776
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002777 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002778 if (!output)
2779 {
2780 return Fail("%s: Could not read output 0", __func__);
2781 }
2782
2783 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2784 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2785
2786 if (IsDynamicTensor(outputInfo))
2787 {
2788 return Fail("%s: Dynamic output tensors are not supported", __func__);
2789 }
2790 if (outputInfo.GetNumDimensions() != 4u)
2791 {
2792 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2793 }
2794
2795 armnn::NormalizationDescriptor descriptor;
2796 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2797 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2798 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2799
2800 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002801 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002802 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2803 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2804 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2805 {
2806 return Fail("%s: Operation has invalid inputs", __func__);
2807 }
2808
2809 // ArmNN expects normSize to be the full size of the normalization
2810 // window rather than the radius as in AndroidNN.
2811 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2812
2813 bool isSupported = false;
2814 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2815 IsNormalizationSupported,
2816 data.m_Backends,
2817 isSupported,
2818 inputInfo,
2819 outputInfo,
2820 descriptor);
2821 if (!isSupported)
2822 {
2823 return false;
2824 }
2825
2826
2827 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2828 assert(layer != nullptr);
2829 input.Connect(layer->GetInputSlot(0));
2830
2831 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2832}
2833
2834template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002835 typename HalOperation = typename HalPolicy::Operation,
2836 typename HalModel = typename HalPolicy::Model>
2837bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002838{
Mike Kelly46272802019-08-14 17:00:48 +01002839 armnn::ActivationDescriptor desc;
2840 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2841
2842 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2843}
2844
2845template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002846 typename HalOperation = typename HalPolicy::Operation,
2847 typename HalModel = typename HalPolicy::Model>
2848bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002849{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002850 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002851
2852 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2853 if (!input.IsValid())
2854 {
2855 return Fail("%s: Operation has invalid inputs", __func__);
2856 }
2857
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002858 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002859 if (!output)
2860 {
2861 return Fail("%s: Could not read output 0", __func__);
2862 }
2863
2864 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2865 if (IsDynamicTensor(outputInfo))
2866 {
2867 return Fail("%s: Dynamic output tensors are not supported", __func__);
2868 }
2869
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002870 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002871 if (!axisOperand)
2872 {
2873 return Fail("%s: Could not read input 1", __func__);
2874 }
2875
2876 std::vector<int32_t> axis;
2877 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2878 {
2879 return Fail("%s: Input 1 has invalid values", __func__);
2880 }
2881
2882 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2883
2884 // Convert the axis to unsigned int and remove duplicates.
2885 unsigned int rank = inputInfo.GetNumDimensions();
2886 std::set<unsigned int> uniqueAxis;
2887 std::transform(axis.begin(), axis.end(),
2888 std::inserter(uniqueAxis, uniqueAxis.begin()),
2889 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2890
2891 // Get the "keep dims" flag.
2892 int32_t keepDims = 0;
2893 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2894 {
2895 return Fail("%s: Could not read input 2", __func__);
2896 }
2897
2898 armnn::MeanDescriptor descriptor;
2899 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2900 descriptor.m_KeepDims = keepDims > 0;
2901
2902 bool isSupported = false;
2903 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2904 IsMeanSupported,
2905 data.m_Backends,
2906 isSupported,
2907 inputInfo,
2908 outputInfo,
2909 descriptor);
2910 if (!isSupported)
2911 {
2912 return false;
2913 }
2914
2915 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2916 assert(layer != nullptr);
2917 input.Connect(layer->GetInputSlot(0));
2918
2919 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2920}
2921
2922template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002923 typename HalOperation = typename HalPolicy::Operation,
2924 typename HalModel = typename HalPolicy::Model>
2925bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002926{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002927 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002928
2929 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2930 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2931
2932 if (!input0.IsValid() || !input1.IsValid())
2933 {
2934 return Fail("%s: Operation has invalid inputs", __func__);
2935 }
2936
2937 // The FuseActivation parameter is always the input index 2
2938 // and it should be optional
2939 ActivationFn activationFunction;
2940 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2941 {
2942 return Fail("%s: Operation has invalid inputs", __func__);
2943 }
2944
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002945 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002946
2947 if (outputOperand == nullptr)
2948 {
2949 return false;
2950 }
2951
2952 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2953 if (IsDynamicTensor(outputInfo))
2954 {
2955 return Fail("%s: Dynamic output tensors are not supported", __func__);
2956 }
2957
2958 bool isSupported = false;
2959 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2960 IsMultiplicationSupported,
2961 data.m_Backends,
2962 isSupported,
2963 input0.GetTensorInfo(),
2964 input1.GetTensorInfo(),
2965 outputInfo);
2966 if (!isSupported)
2967 {
2968 return false;
2969 }
2970
2971 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2972 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2973
2974 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2975 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2976
2977 if (endLayer != nullptr)
2978 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00002979 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01002980 if (!isReshapeSupported)
2981 {
2982 return false;
2983 }
2984
Mike Kelly46272802019-08-14 17:00:48 +01002985 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2986 }
2987 else
2988 {
2989 return Fail("%s: ProcessActivation failed", __func__);
2990 }
2991}
2992
2993template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002994 typename HalOperation = typename HalPolicy::Operation,
2995 typename HalModel = typename HalPolicy::Model>
2996bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002997{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002998 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002999
Mike Kelly3c673942019-07-25 09:26:06 +01003000 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3001 if (!input.IsValid())
3002 {
3003 return Fail("%s: Operation has invalid inputs", __func__);
3004 }
3005
3006 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3007 unsigned int rank = inputInfo.GetNumDimensions();
3008
3009 armnn::PadDescriptor descriptor;
3010 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3011 {
3012 return Fail("%s: Could not convert paddings", __func__);
3013 }
3014
3015 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3016 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3017 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Derek Lamberti1a38cda2020-01-10 17:28:20 +00003018 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8)
Mike Kelly3c673942019-07-25 09:26:06 +01003019 {
3020 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3021 }
3022
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003023 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003024 if (!output)
3025 {
3026 return Fail("%s: Could not read output", __func__);
3027 }
3028
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003029 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003030 if (IsDynamicTensor(outputInfo))
3031 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003032 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01003033 }
3034
3035 bool isSupported = false;
3036 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3037 IsPadSupported,
3038 data.m_Backends,
3039 isSupported,
3040 inputInfo,
3041 outputInfo,
3042 descriptor);
3043 if (!isSupported)
3044 {
3045 return false;
3046 }
3047
3048 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3049 assert(layer != nullptr);
3050 input.Connect(layer->GetInputSlot(0));
3051 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
3052
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003053 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01003054}
3055
Mike Kelly0a879362019-07-29 16:56:31 +01003056template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003057 typename HalOperation = typename HalPolicy::Operation,
3058 typename HalModel = typename HalPolicy::Model>
3059bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003060{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003061 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003062
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003063 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3064 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3065 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003066
3067 if (inputOperand == nullptr
3068 || requestedShapeOperand == nullptr
3069 || outputOperand == nullptr)
3070 {
3071 return Fail("%s: Operation has invalid inputs", __func__);
3072 }
3073
3074 if (requestedShapeOperand->dimensions.size() != 1)
3075 {
3076 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3077 __func__, requestedShapeOperand->dimensions.size());
3078 }
3079
3080 std::vector<int32_t> targetDimensions;
3081 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3082 {
3083 return Fail("%s: Could not read values of input 1", __func__);
3084 }
3085
3086 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3087
3088 Shape requestedShape;
3089 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3090 // function that resolves these values into a fully specified tensor shape.
3091 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3092 {
3093 return Fail("%s: Failed to resolve the requested shape", __func__);
3094 }
3095
3096 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3097 if (!SameShape(requestedShape, outputOperandShape))
3098 {
3099 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3100 }
3101
3102 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3103 if (!input.IsValid())
3104 {
3105 return Fail("%s: Could not read input 0", __func__);
3106 }
3107
3108 armnn::ReshapeDescriptor reshapeDescriptor;
3109 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3110 requestedShape.dimensions.data());
3111
3112 bool isSupported = false;
3113 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3114 IsReshapeSupported,
3115 data.m_Backends,
3116 isSupported,
3117 input.GetTensorInfo(),
Kevin Mayaed08ac2019-12-12 16:33:31 +00003118 GetTensorInfoForOperand(*outputOperand),
Mike Kelly46272802019-08-14 17:00:48 +01003119 reshapeDescriptor);
3120 if (!isSupported)
3121 {
3122 return false;
3123 }
3124
3125 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3126 assert(layer != nullptr);
3127 input.Connect(layer->GetInputSlot(0));
3128
3129 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3130}
3131
3132template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003133 typename HalOperation = typename HalPolicy::Operation,
3134 typename HalModel = typename HalPolicy::Model>
3135bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003136{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003137 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003138
Mike Kelly0a879362019-07-29 16:56:31 +01003139 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3140 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3141
3142 if (!input0.IsValid() || !input1.IsValid())
3143 {
3144 return Fail("%s: Operation has invalid inputs", __func__);
3145 }
3146
3147 // The FuseActivation parameter is always the input index 2
3148 // and it should be optional
3149 ActivationFn activationFunction;
3150 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3151 {
3152 return Fail("%s: Operation has invalid inputs", __func__);
3153 }
3154
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003155 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003156 if (!output)
3157 {
3158 return Fail("%s: Could not read output 0", __func__);
3159 }
3160
3161 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3162 if (IsDynamicTensor(outputInfo))
3163 {
3164 return Fail("%s: Dynamic output tensors are not supported", __func__);
3165 }
3166
3167 bool isSupported = false;
3168 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3169 IsSubtractionSupported,
3170 data.m_Backends,
3171 isSupported,
3172 input0.GetTensorInfo(),
3173 input1.GetTensorInfo(),
3174 outputInfo);
3175 if (!isSupported)
3176 {
3177 return false;
3178 }
3179
3180 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3181 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3182
3183 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3184 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3185
3186 if (endLayer)
3187 {
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +00003188 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
Sadik Armagan64b19b52019-08-19 09:49:58 +01003189 if (!isReshapeSupported)
3190 {
3191 return false;
3192 }
Mike Kelly0a879362019-07-29 16:56:31 +01003193 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3194 }
3195
3196 return Fail("%s: ProcessActivation failed", __func__);
3197}
3198
Finn Williams23b87b32019-07-30 11:44:05 +01003199template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003200 typename HalOperation = typename HalPolicy::Operation,
3201 typename HalModel = typename HalPolicy::Model>
3202bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003203{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003204 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003205
3206 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3207 if (!input.IsValid())
3208 {
3209 return Fail("%s: Operation has invalid inputs", __func__);
3210 }
3211
3212 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3213 unsigned int rank = inputInfo.GetNumDimensions();
3214 if (rank > 4)
3215 {
3216 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3217 }
3218
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003219 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003220 if (!output)
3221 {
3222 return Fail("%s: Could not read output 0", __func__);
3223 }
3224
3225 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3226 {
3227 return Fail("%s: Dynamic output tensors are not supported", __func__);
3228 }
3229
3230 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3231 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003232 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003233
3234 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3235
3236 std::vector<int32_t> axis;
3237 if (!axisOperand)
3238 {
3239 axis.assign(dimensionSequence,
3240 dimensionSequence + rank);
3241 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003242 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003243 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003244 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003245 }
3246
3247 std::vector<uint32_t> outputDims;
3248 for (unsigned int i = 0; i < rank; i++)
3249 {
3250 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3251 auto currentDimension = inputInfo.GetShape()[i];
3252 if (skipSqueeze || currentDimension != 1)
3253 {
3254 outputDims.push_back(currentDimension);
3255 }
3256 }
3257
3258 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3259
3260 armnn::TensorInfo outputInfo = inputInfo;
3261 outputInfo.SetShape(outShape);
3262
3263 armnn::ReshapeDescriptor reshapeDesc;
3264 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3265
3266 bool isSupported = false;
3267 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3268 IsReshapeSupported,
3269 data.m_Backends,
3270 isSupported,
3271 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003272 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003273 reshapeDesc);
3274 if (!isSupported)
3275 {
3276 return false;
3277 }
3278
3279 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3280 assert(layer != nullptr);
3281 input.Connect(layer->GetInputSlot(0));
3282
3283 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3284}
3285
3286template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003287 typename HalOperation = typename HalPolicy::Operation,
3288 typename HalModel = typename HalPolicy::Model>
3289bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003290{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003291 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003292
3293 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3294 if (!input.IsValid())
3295 {
3296 return Fail("%s: Operation has invalid inputs", __func__);
3297 }
3298
3299 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3300 unsigned int rank = inputInfo.GetNumDimensions();
3301 if (rank > 4)
3302 {
3303 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3304 }
3305
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003306 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003307 if (!output)
3308 {
3309 return Fail("%s: Could not read output 0", __func__);
3310 }
3311
3312 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3313 if (IsDynamicTensor(outputInfo))
3314 {
3315 return Fail("%s: Dynamic output tensors are not supported", __func__);
3316 }
3317
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003318 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3319 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3320 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003321
3322 std::vector<int32_t> beginValues;
3323 std::vector<int32_t> endValues;
3324 std::vector<int32_t> stridesValues;
3325
3326 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003327 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003328 {
3329 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3330 {
3331 return false;
3332 }
3333
3334 if (operandValues.size() != rank)
3335 {
3336 return false;
3337 }
3338
3339 return true;
3340 };
3341
3342 if (!ValidateInputOperands(*beginOperand, beginValues)
3343 || !ValidateInputOperands(*endOperand, endValues)
3344 || !ValidateInputOperands(*stridesOperand, stridesValues))
3345 {
3346 return Fail("%s: Operation has invalid input operand", __func__);
3347 }
3348
3349 // Stride cannot have value '0'
3350 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3351 {
3352 return Fail("%s: Stride must be non-zero value.", __func__);
3353 }
3354
3355 armnn::StridedSliceDescriptor descriptor;
3356 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3357 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3358 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3359 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3360
3361 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3362 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3363 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3364 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3365 {
3366 return Fail("%s: Operation has invalid inputs", __func__);
3367 }
3368
3369 bool isSupported = false;
3370 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3371 IsStridedSliceSupported,
3372 data.m_Backends,
3373 isSupported,
3374 inputInfo,
3375 outputInfo,
3376 descriptor);
3377 if (!isSupported)
3378 {
3379 return false;
3380 }
3381
3382 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3383 assert(layer != nullptr);
3384 input.Connect(layer->GetInputSlot(0));
3385
3386 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3387}
3388
3389template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003390 typename HalOperation = typename HalPolicy::Operation,
3391 typename HalModel = typename HalPolicy::Model>
3392bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003393{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003394 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003395
3396 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3397 if (!input.IsValid())
3398 {
3399 return Fail("%s: Operation has invalid inputs", __func__);
3400 }
3401
3402 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3403 unsigned int rank = inputInfo.GetNumDimensions();
3404 if (rank > 4)
3405 {
3406 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3407 }
3408
3409 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3410 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003411 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003412
3413 std::vector<int32_t> perm(rank);
3414 if (!permOperand)
3415 {
3416 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3417 for (unsigned int i = rank; i > 0; i--)
3418 {
3419 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3420 }
3421 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003422 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003423 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003424 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003425 }
3426
3427 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3428
Mike Kelly4a956582020-02-28 10:32:09 +00003429 armnn::TransposeDescriptor transposeDesc;
3430 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003431
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003432 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003433 if (!output)
3434 {
3435 return Fail("%s: Could not read output 0", __func__);
3436 }
3437
3438 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003439 if (IsDynamicTensor(outputInfo))
3440 {
3441 return Fail("%s: Dynamic output tensors are not supported", __func__);
3442 }
3443
Mike Kelly46272802019-08-14 17:00:48 +01003444
3445 bool isSupported = false;
3446 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00003447 IsTransposeSupported,
Mike Kelly46272802019-08-14 17:00:48 +01003448 data.m_Backends,
3449 isSupported,
3450 inputInfo,
3451 outputInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00003452 transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003453 if (!isSupported)
3454 {
3455 return false;
3456 }
3457
Mike Kelly4a956582020-02-28 10:32:09 +00003458 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003459 assert(layer != nullptr);
3460 input.Connect(layer->GetInputSlot(0));
3461
3462 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3463}
3464
3465template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003466 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003467 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003468 typename HalModel = typename HalPolicy::Model>
3469bool ConvertBatchToSpaceNd(const HalOperation& operation,
3470 const HalModel& model,
3471 ConversionData& data)
3472{
Finn Williams23b87b32019-07-30 11:44:05 +01003473
3474 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3475 if (!input.IsValid())
3476 {
3477 return Fail("%s: Operation has invalid inputs", __func__);
3478 }
3479
3480 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3481 if (!output)
3482 {
3483 return Fail("%s: Could not read output 0", __func__);
3484 }
3485
3486 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3487 if (IsDynamicTensor(outputInfo))
3488 {
3489 return Fail("%s: Dynamic output tensors are not supported", __func__);
3490 }
3491
3492 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3493 if (!blockOperand)
3494 {
3495 return Fail("%s: Could not read input 1", __func__);
3496 }
3497
3498 // Convert the block operand to int32
3499 std::vector<int32_t> block;
3500 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3501 {
3502 return Fail("%s: Input 1 has invalid values", __func__);
3503 }
3504
3505 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3506
3507 unsigned int rank = inputInfo.GetNumDimensions();
3508 if (rank != 4)
3509 {
3510 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3511 }
3512
3513 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3514 {
3515 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3516 " greater than or equal to 1", __func__);
3517 }
3518
3519 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3520 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3521 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3522
3523 if (Is12Operand(*output))
3524 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003525 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003526 }
3527 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3528 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3529
3530 bool isSupported = false;
3531 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3532 IsBatchToSpaceNdSupported,
3533 data.m_Backends,
3534 isSupported,
3535 inputInfo,
3536 outputInfo,
3537 batchToSpaceNdDesc);
3538 if (!isSupported)
3539 {
3540 return false;
3541 }
3542
3543 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3544 assert(layer != nullptr);
3545 input.Connect(layer->GetInputSlot(0));
3546
3547 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3548}
Mike Kelly0a879362019-07-29 16:56:31 +01003549
Finn Williamsd74c5052019-07-30 17:06:00 +01003550template<typename HalPolicy,
3551 typename HalOperation = typename HalPolicy::Operation,
3552 typename HalOperand = typename HalPolicy::Operand,
3553 typename HalModel = typename HalPolicy::Model>
3554bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3555{
3556 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3557 if (!input.IsValid())
3558 {
3559 return Fail("%s: Operation has invalid inputs", __func__);
3560 }
3561
3562 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3563 unsigned int rank = inputInfo.GetNumDimensions();
3564 unsigned int spatialDim = rank - 2;
3565
3566 if (rank != 4)
3567 {
3568 Fail("%s: Only inputs with rank 4 are supported", __func__);
3569 }
3570
3571 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3572 if (!output)
3573 {
3574 return Fail("%s: Could not read output 0", __func__);
3575 }
3576
3577 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3578 if (IsDynamicTensor(outputInfo))
3579 {
3580 return Fail("%s: Dynamic output tensors are not supported", __func__);
3581 }
3582
3583 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3584 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3585
3586 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3587 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3588 {
3589 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3590 }
3591
3592 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00003593 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
3594 {
3595 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
3596 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003597 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3598 {
3599 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3600 }
3601
3602 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3603 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3604 {
3605 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3606 }
3607
3608 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3609 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00003610 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
3611 {
3612 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
3613 }
Finn Williamsd74c5052019-07-30 17:06:00 +01003614 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3615 {
3616 int paddingBeforeInput = paddings[i];
3617 int paddingAfterInput = paddings[i + 1];
3618 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3619 {
3620 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3621 }
3622
3623 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3624 }
3625
3626 armnn::SpaceToBatchNdDescriptor descriptor;
3627 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3628 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3629 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3630
3631 if (Is12Operand(*output))
3632 {
3633 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3634 }
3635
3636 bool isSupported = false;
3637 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3638 IsSpaceToBatchNdSupported,
3639 data.m_Backends,
3640 isSupported,
3641 inputInfo,
3642 outputInfo,
3643 descriptor);
3644 if (!isSupported)
3645 {
3646 return false;
3647 }
3648
3649 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3650 assert(layer != nullptr);
3651 input.Connect(layer->GetInputSlot(0));
3652
3653 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3654}
3655
saoste01b8471482018-10-10 09:44:51 +01003656} // namespace armnn_driver